function_name stringlengths 3 35 | file_path stringlengths 28 76 | focal_code stringlengths 385 27.9k | file_content stringlengths 869 132k | language stringclasses 1 value | function_component dict | metadata dict |
|---|---|---|---|---|---|---|
graham_scan | Rust-master/src/geometry/graham_scan.rs | pub fn graham_scan(mut points: Vec<Point>) -> Vec<Point> {
if points.len() <= 2 {
return vec![];
}
let min_point = points.iter().min_by(point_min).unwrap().clone();
points.retain(|p| p != &min_point);
if points.is_empty() {
// edge case where all the points are the same
return vec![];
}
let point_cmp = |a: &Point, b: &Point| -> Ordering {
// Sort points in counter-clockwise direction relative to the min point. We can this by
// checking the orientation of consecutive vectors (min_point, a) and (a, b).
let orientation = min_point.consecutive_orientation(a, b);
if orientation < 0.0 {
Ordering::Greater
} else if orientation > 0.0 {
Ordering::Less
} else {
let a_dist = min_point.euclidean_distance(a);
let b_dist = min_point.euclidean_distance(b);
// When two points have the same relative angle to the min point, we should only
// include the further point in the convex hull. We sort further points into a lower
// index, and in the algorithm, remove all consecutive points with the same relative
// angle.
b_dist.partial_cmp(&a_dist).unwrap()
}
};
points.sort_by(point_cmp);
let mut convex_hull: Vec<Point> = vec![];
// We always add the min_point, and the first two points in the sorted vec.
convex_hull.push(min_point.clone());
convex_hull.push(points[0].clone());
let mut top = 1;
for point in points.iter().skip(1) {
if min_point.consecutive_orientation(point, &convex_hull[top]) == 0.0 {
// Remove consecutive points with the same angle. We make sure include the furthest
// point in the convex hull in the sort comparator.
continue;
}
loop {
// In this loop, we remove points that we determine are no longer part of the convex
// hull.
if top <= 1 {
break;
}
// If there is a segment(i+1, i+2) turns right relative to segment(i, i+1), point(i+1)
// is not part of the convex hull.
let orientation =
convex_hull[top - 1].consecutive_orientation(&convex_hull[top], point);
if orientation <= 0.0 {
top -= 1;
convex_hull.pop();
} else {
break;
}
}
convex_hull.push(point.clone());
top += 1;
}
if convex_hull.len() <= 2 {
return vec![];
}
convex_hull
} | use crate::geometry::Point;
use std::cmp::Ordering;
fn point_min(a: &&Point, b: &&Point) -> Ordering {
// Find the bottom-most point. In the case of a tie, find the left-most.
if a.y == b.y {
a.x.partial_cmp(&b.x).unwrap()
} else {
a.y.partial_cmp(&b.y).unwrap()
}
}
// Returns a Vec of Points that make up the convex hull of `points`. Returns an empty Vec if there
// is no convex hull.
pub fn graham_scan(mut points: Vec<Point>) -> Vec<Point> {
if points.len() <= 2 {
return vec![];
}
let min_point = points.iter().min_by(point_min).unwrap().clone();
points.retain(|p| p != &min_point);
if points.is_empty() {
// edge case where all the points are the same
return vec![];
}
let point_cmp = |a: &Point, b: &Point| -> Ordering {
// Sort points in counter-clockwise direction relative to the min point. We can this by
// checking the orientation of consecutive vectors (min_point, a) and (a, b).
let orientation = min_point.consecutive_orientation(a, b);
if orientation < 0.0 {
Ordering::Greater
} else if orientation > 0.0 {
Ordering::Less
} else {
let a_dist = min_point.euclidean_distance(a);
let b_dist = min_point.euclidean_distance(b);
// When two points have the same relative angle to the min point, we should only
// include the further point in the convex hull. We sort further points into a lower
// index, and in the algorithm, remove all consecutive points with the same relative
// angle.
b_dist.partial_cmp(&a_dist).unwrap()
}
};
points.sort_by(point_cmp);
let mut convex_hull: Vec<Point> = vec![];
// We always add the min_point, and the first two points in the sorted vec.
convex_hull.push(min_point.clone());
convex_hull.push(points[0].clone());
let mut top = 1;
for point in points.iter().skip(1) {
if min_point.consecutive_orientation(point, &convex_hull[top]) == 0.0 {
// Remove consecutive points with the same angle. We make sure include the furthest
// point in the convex hull in the sort comparator.
continue;
}
loop {
// In this loop, we remove points that we determine are no longer part of the convex
// hull.
if top <= 1 {
break;
}
// If there is a segment(i+1, i+2) turns right relative to segment(i, i+1), point(i+1)
// is not part of the convex hull.
let orientation =
convex_hull[top - 1].consecutive_orientation(&convex_hull[top], point);
if orientation <= 0.0 {
top -= 1;
convex_hull.pop();
} else {
break;
}
}
convex_hull.push(point.clone());
top += 1;
}
if convex_hull.len() <= 2 {
return vec![];
}
convex_hull
}
#[cfg(test)]
mod tests {
use super::graham_scan;
use super::Point;
fn test_graham(convex_hull: Vec<Point>, others: Vec<Point>) {
let mut points = convex_hull.clone();
points.append(&mut others.clone());
let graham = graham_scan(points);
for point in convex_hull {
assert!(graham.contains(&point));
}
for point in others {
assert!(!graham.contains(&point));
}
}
#[test]
fn too_few_points() {
test_graham(vec![], vec![]);
test_graham(vec![], vec![Point::new(0.0, 0.0)]);
}
#[test]
fn duplicate_point() {
let p = Point::new(0.0, 0.0);
test_graham(vec![], vec![p.clone(), p.clone(), p.clone(), p.clone(), p]);
}
#[test]
fn points_same_line() {
let p1 = Point::new(1.0, 0.0);
let p2 = Point::new(2.0, 0.0);
let p3 = Point::new(3.0, 0.0);
let p4 = Point::new(4.0, 0.0);
let p5 = Point::new(5.0, 0.0);
// let p6 = Point::new(1.0, 1.0);
test_graham(vec![], vec![p1, p2, p3, p4, p5]);
}
#[test]
fn triangle() {
let p1 = Point::new(1.0, 1.0);
let p2 = Point::new(2.0, 1.0);
let p3 = Point::new(1.5, 2.0);
let points = vec![p1, p2, p3];
test_graham(points, vec![]);
}
#[test]
fn rectangle() {
let p1 = Point::new(1.0, 1.0);
let p2 = Point::new(2.0, 1.0);
let p3 = Point::new(2.0, 2.0);
let p4 = Point::new(1.0, 2.0);
let points = vec![p1, p2, p3, p4];
test_graham(points, vec![]);
}
#[test]
fn triangle_with_points_in_middle() {
let p1 = Point::new(1.0, 1.0);
let p2 = Point::new(2.0, 1.0);
let p3 = Point::new(1.5, 2.0);
let p4 = Point::new(1.5, 1.5);
let p5 = Point::new(1.2, 1.3);
let p6 = Point::new(1.8, 1.2);
let p7 = Point::new(1.5, 1.9);
let hull = vec![p1, p2, p3];
let others = vec![p4, p5, p6, p7];
test_graham(hull, others);
}
#[test]
fn rectangle_with_points_in_middle() {
let p1 = Point::new(1.0, 1.0);
let p2 = Point::new(2.0, 1.0);
let p3 = Point::new(2.0, 2.0);
let p4 = Point::new(1.0, 2.0);
let p5 = Point::new(1.5, 1.5);
let p6 = Point::new(1.2, 1.3);
let p7 = Point::new(1.8, 1.2);
let p8 = Point::new(1.9, 1.7);
let p9 = Point::new(1.4, 1.9);
let hull = vec![p1, p2, p3, p4];
let others = vec![p5, p6, p7, p8, p9];
test_graham(hull, others);
}
#[test]
fn star() {
// A single stroke star shape (kind of). Only the tips(p1-5) are part of the convex hull. The
// other points would create angles >180 degrees if they were part of the polygon.
let p1 = Point::new(-5.0, 6.0);
let p2 = Point::new(-11.0, 0.0);
let p3 = Point::new(-9.0, -8.0);
let p4 = Point::new(4.0, 4.0);
let p5 = Point::new(6.0, -7.0);
let p6 = Point::new(-7.0, -2.0);
let p7 = Point::new(-2.0, -4.0);
let p8 = Point::new(0.0, 1.0);
let p9 = Point::new(1.0, 0.0);
let p10 = Point::new(-6.0, 1.0);
let hull = vec![p1, p2, p3, p4, p5];
let others = vec![p6, p7, p8, p9, p10];
test_graham(hull, others);
}
#[test]
fn rectangle_with_points_on_same_line() {
let p1 = Point::new(1.0, 1.0);
let p2 = Point::new(2.0, 1.0);
let p3 = Point::new(2.0, 2.0);
let p4 = Point::new(1.0, 2.0);
let p5 = Point::new(1.5, 1.0);
let p6 = Point::new(1.0, 1.5);
let p7 = Point::new(2.0, 1.5);
let p8 = Point::new(1.5, 2.0);
let hull = vec![p1, p2, p3, p4];
let others = vec![p5, p6, p7, p8];
test_graham(hull, others);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub struct Point {\n pub x: f64,\n pub y: f64,\n}"
],
"name": "points",
"type": "Vec<Point>"
}
],
"end_line": 82,
"name": "graham_scan",
"signature": "pub fn graham_scan(mut points: Vec<Point>) -> Vec<Point>",
"start_line": 15
} | {
"class_name": "",
"class_signature": ""
} |
ramer_douglas_peucker | Rust-master/src/geometry/ramer_douglas_peucker.rs | pub fn ramer_douglas_peucker(points: &[Point], epsilon: f64) -> Vec<Point> {
if points.len() < 3 {
return points.to_vec();
}
let mut dmax = 0.0;
let mut index = 0;
let end = points.len() - 1;
for i in 1..end {
let d = perpendicular_distance(&points[i], &points[0], &points[end]);
if d > dmax {
index = i;
dmax = d;
}
}
if dmax > epsilon {
let mut results = ramer_douglas_peucker(&points[..=index], epsilon);
results.pop();
results.extend(ramer_douglas_peucker(&points[index..], epsilon));
results
} else {
vec![points[0].clone(), points[end].clone()]
}
} | use crate::geometry::Point;
pub fn ramer_douglas_peucker(points: &[Point], epsilon: f64) -> Vec<Point> {
if points.len() < 3 {
return points.to_vec();
}
let mut dmax = 0.0;
let mut index = 0;
let end = points.len() - 1;
for i in 1..end {
let d = perpendicular_distance(&points[i], &points[0], &points[end]);
if d > dmax {
index = i;
dmax = d;
}
}
if dmax > epsilon {
let mut results = ramer_douglas_peucker(&points[..=index], epsilon);
results.pop();
results.extend(ramer_douglas_peucker(&points[index..], epsilon));
results
} else {
vec![points[0].clone(), points[end].clone()]
}
}
fn perpendicular_distance(p: &Point, a: &Point, b: &Point) -> f64 {
let num = (b.y - a.y) * p.x - (b.x - a.x) * p.y + b.x * a.y - b.y * a.x;
let den = a.euclidean_distance(b);
num.abs() / den
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! test_perpendicular_distance {
($($name:ident: $test_case:expr,)*) => {
$(
#[test]
fn $name() {
let (p, a, b, expected) = $test_case;
assert_eq!(perpendicular_distance(&p, &a, &b), expected);
assert_eq!(perpendicular_distance(&p, &b, &a), expected);
}
)*
};
}
test_perpendicular_distance! {
basic: (Point::new(4.0, 0.0), Point::new(0.0, 0.0), Point::new(0.0, 3.0), 4.0),
basic_shifted_1: (Point::new(4.0, 1.0), Point::new(0.0, 1.0), Point::new(0.0, 4.0), 4.0),
basic_shifted_2: (Point::new(2.0, 1.0), Point::new(-2.0, 1.0), Point::new(-2.0, 4.0), 4.0),
}
#[test]
fn test_ramer_douglas_peucker_polygon() {
let a = Point::new(0.0, 0.0);
let b = Point::new(1.0, 0.0);
let c = Point::new(2.0, 0.0);
let d = Point::new(2.0, 1.0);
let e = Point::new(2.0, 2.0);
let f = Point::new(1.0, 2.0);
let g = Point::new(0.0, 2.0);
let h = Point::new(0.0, 1.0);
let polygon = vec![
a.clone(),
b,
c.clone(),
d,
e.clone(),
f,
g.clone(),
h.clone(),
];
let epsilon = 0.7;
let result = ramer_douglas_peucker(&polygon, epsilon);
assert_eq!(result, vec![a, c, e, g, h]);
}
#[test]
fn test_ramer_douglas_peucker_polygonal_chain() {
let a = Point::new(0., 0.);
let b = Point::new(2., 0.5);
let c = Point::new(3., 3.);
let d = Point::new(6., 3.);
let e = Point::new(8., 4.);
let points = vec![a.clone(), b, c, d, e.clone()];
let epsilon = 3.; // The epsilon is quite large, so the result will be a single line
let result = ramer_douglas_peucker(&points, epsilon);
assert_eq!(result, vec![a, e]);
}
#[test]
fn test_less_than_three_points() {
let a = Point::new(0., 0.);
let b = Point::new(1., 1.);
let epsilon = 0.1;
assert_eq!(ramer_douglas_peucker(&[], epsilon), vec![]);
assert_eq!(
ramer_douglas_peucker(&[a.clone()], epsilon),
vec![a.clone()]
);
assert_eq!(
ramer_douglas_peucker(&[a.clone(), b.clone()], epsilon),
vec![a, b]
);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Point {\n pub x: f64,\n pub y: f64,\n}"
],
"name": "points",
"type": "&[Point]"
}
],
"end_line": 27,
"name": "ramer_douglas_peucker",
"signature": "pub fn ramer_douglas_peucker(points: &[Point], epsilon: f64) -> Vec<Point>",
"start_line": 3
} | {
"class_name": "",
"class_signature": ""
} |
k_means | Rust-master/src/machine_learning/k_means.rs | pub fn k_means(data_points: Vec<(f64, f64)>, n_clusters: usize, max_iter: i32) -> Option<Vec<u32>> {
if data_points.len() < n_clusters {
return None;
}
let mut centroids: Vec<(f64, f64)> = Vec::new();
let mut labels: Vec<u32> = vec![0; data_points.len()];
for _ in 0..n_clusters {
let x: f64 = random::<f64>();
let y: f64 = random::<f64>();
centroids.push((x, y));
}
let mut count_iter: i32 = 0;
while count_iter < max_iter {
let mut new_centroids_position: Vec<(f64, f64)> = vec![(0.0, 0.0); n_clusters];
let mut new_centroids_num: Vec<u32> = vec![0; n_clusters];
for (i, d) in data_points.iter().enumerate() {
let nearest_cluster = find_nearest(d, ¢roids);
labels[i] = nearest_cluster;
new_centroids_position[nearest_cluster as usize].0 += d.0;
new_centroids_position[nearest_cluster as usize].1 += d.1;
new_centroids_num[nearest_cluster as usize] += 1;
}
for i in 0..centroids.len() {
if new_centroids_num[i] == 0 {
continue;
}
let new_x: f64 = new_centroids_position[i].0 / new_centroids_num[i] as f64;
let new_y: f64 = new_centroids_position[i].1 / new_centroids_num[i] as f64;
centroids[i] = (new_x, new_y);
}
count_iter += 1;
}
Some(labels)
} | use rand::random;
fn get_distance(p1: &(f64, f64), p2: &(f64, f64)) -> f64 {
let dx: f64 = p1.0 - p2.0;
let dy: f64 = p1.1 - p2.1;
((dx * dx) + (dy * dy)).sqrt()
}
fn find_nearest(data_point: &(f64, f64), centroids: &[(f64, f64)]) -> u32 {
let mut cluster: u32 = 0;
for (i, c) in centroids.iter().enumerate() {
let d1 = get_distance(data_point, c);
let d2 = get_distance(data_point, ¢roids[cluster as usize]);
if d1 < d2 {
cluster = i as u32;
}
}
cluster
}
pub fn k_means(data_points: Vec<(f64, f64)>, n_clusters: usize, max_iter: i32) -> Option<Vec<u32>> {
if data_points.len() < n_clusters {
return None;
}
let mut centroids: Vec<(f64, f64)> = Vec::new();
let mut labels: Vec<u32> = vec![0; data_points.len()];
for _ in 0..n_clusters {
let x: f64 = random::<f64>();
let y: f64 = random::<f64>();
centroids.push((x, y));
}
let mut count_iter: i32 = 0;
while count_iter < max_iter {
let mut new_centroids_position: Vec<(f64, f64)> = vec![(0.0, 0.0); n_clusters];
let mut new_centroids_num: Vec<u32> = vec![0; n_clusters];
for (i, d) in data_points.iter().enumerate() {
let nearest_cluster = find_nearest(d, ¢roids);
labels[i] = nearest_cluster;
new_centroids_position[nearest_cluster as usize].0 += d.0;
new_centroids_position[nearest_cluster as usize].1 += d.1;
new_centroids_num[nearest_cluster as usize] += 1;
}
for i in 0..centroids.len() {
if new_centroids_num[i] == 0 {
continue;
}
let new_x: f64 = new_centroids_position[i].0 / new_centroids_num[i] as f64;
let new_y: f64 = new_centroids_position[i].1 / new_centroids_num[i] as f64;
centroids[i] = (new_x, new_y);
}
count_iter += 1;
}
Some(labels)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_k_means() {
let mut data_points: Vec<(f64, f64)> = vec![];
let n_points: usize = 1000;
for _ in 0..n_points {
let x: f64 = random::<f64>() * 100.0;
let y: f64 = random::<f64>() * 100.0;
data_points.push((x, y));
}
println!("{:?}", k_means(data_points, 10, 100).unwrap_or_default());
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "data_points",
"type": "Vec<(f64, f64"
}
],
"end_line": 70,
"name": "k_means",
"signature": "pub fn k_means(data_points: Vec<(f64, f64)>, n_clusters: usize, max_iter: i32) -> Option<Vec<u32>>",
"start_line": 25
} | {
"class_name": "",
"class_signature": ""
} |
cholesky | Rust-master/src/machine_learning/cholesky.rs | pub fn cholesky(mat: Vec<f64>, n: usize) -> Vec<f64> {
if (mat.is_empty()) || (n == 0) {
return vec![];
}
let mut res = vec![0.0; mat.len()];
for i in 0..n {
for j in 0..=i {
let mut s = 0.0;
for k in 0..j {
s += res[i * n + k] * res[j * n + k];
}
let value = if i == j {
let diag_value = mat[i * n + i] - s;
if diag_value.is_nan() {
0.0
} else {
diag_value.sqrt()
}
} else {
let off_diag_value = 1.0 / res[j * n + j] * (mat[i * n + j] - s);
if off_diag_value.is_nan() {
0.0
} else {
off_diag_value
}
};
res[i * n + j] = value;
}
}
res
} | pub fn cholesky(mat: Vec<f64>, n: usize) -> Vec<f64> {
if (mat.is_empty()) || (n == 0) {
return vec![];
}
let mut res = vec![0.0; mat.len()];
for i in 0..n {
for j in 0..=i {
let mut s = 0.0;
for k in 0..j {
s += res[i * n + k] * res[j * n + k];
}
let value = if i == j {
let diag_value = mat[i * n + i] - s;
if diag_value.is_nan() {
0.0
} else {
diag_value.sqrt()
}
} else {
let off_diag_value = 1.0 / res[j * n + j] * (mat[i * n + j] - s);
if off_diag_value.is_nan() {
0.0
} else {
off_diag_value
}
};
res[i * n + j] = value;
}
}
res
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cholesky() {
// Test case 1
let mat1 = vec![25.0, 15.0, -5.0, 15.0, 18.0, 0.0, -5.0, 0.0, 11.0];
let res1 = cholesky(mat1, 3);
// The expected Cholesky decomposition values
#[allow(clippy::useless_vec)]
let expected1 = vec![5.0, 0.0, 0.0, 3.0, 3.0, 0.0, -1.0, 1.0, 3.0];
assert!(res1
.iter()
.zip(expected1.iter())
.all(|(a, b)| (a - b).abs() < 1e-6));
}
fn transpose_matrix(mat: &[f64], n: usize) -> Vec<f64> {
(0..n)
.flat_map(|i| (0..n).map(move |j| mat[j * n + i]))
.collect()
}
fn matrix_multiply(mat1: &[f64], mat2: &[f64], n: usize) -> Vec<f64> {
(0..n)
.flat_map(|i| {
(0..n).map(move |j| {
(0..n).fold(0.0, |acc, k| acc + mat1[i * n + k] * mat2[k * n + j])
})
})
.collect()
}
#[test]
fn test_matrix_operations() {
// Test case 1: Transposition
let mat1 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let transposed_mat1 = transpose_matrix(&mat1, 3);
let expected_transposed_mat1 = vec![1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0];
assert_eq!(transposed_mat1, expected_transposed_mat1);
// Test case 2: Matrix multiplication
let mat2 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let mat3 = vec![9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0];
let multiplied_mat = matrix_multiply(&mat2, &mat3, 3);
let expected_multiplied_mat = vec![30.0, 24.0, 18.0, 84.0, 69.0, 54.0, 138.0, 114.0, 90.0];
assert_eq!(multiplied_mat, expected_multiplied_mat);
}
#[test]
fn empty_matrix() {
let mat = vec![];
let res = cholesky(mat, 0);
assert_eq!(res, vec![]);
}
#[test]
fn matrix_with_all_zeros() {
let mat3 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
let res3 = cholesky(mat3, 3);
let expected3 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
assert_eq!(res3, expected3);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "mat",
"type": "Vec<f64>"
}
],
"end_line": 31,
"name": "cholesky",
"signature": "pub fn cholesky(mat: Vec<f64>, n: usize) -> Vec<f64>",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
knapsack | Rust-master/src/dynamic_programming/knapsack.rs | pub fn knapsack(capacity: usize, items: Vec<Item>) -> KnapsackSolution {
let num_items = items.len();
let item_weights: Vec<usize> = items.iter().map(|item| item.weight).collect();
let item_values: Vec<usize> = items.iter().map(|item| item.value).collect();
let knapsack_matrix = generate_knapsack_matrix(capacity, &item_weights, &item_values);
let items_included =
retrieve_knapsack_items(&item_weights, &knapsack_matrix, num_items, capacity);
let total_weight = items_included
.iter()
.map(|&index| item_weights[index - 1])
.sum();
KnapsackSolution {
optimal_profit: knapsack_matrix[num_items][capacity],
total_weight,
item_indices: items_included,
}
} | //! This module provides functionality to solve the knapsack problem using dynamic programming.
//! It includes structures for items and solutions, and functions to compute the optimal solution.
use std::cmp::Ordering;
/// Represents an item with a weight and a value.
#[derive(Debug, PartialEq, Eq)]
pub struct Item {
weight: usize,
value: usize,
}
/// Represents the solution to the knapsack problem.
#[derive(Debug, PartialEq, Eq)]
pub struct KnapsackSolution {
/// The optimal profit obtained.
optimal_profit: usize,
/// The total weight of items included in the solution.
total_weight: usize,
/// The indices of items included in the solution. Indices might not be unique.
item_indices: Vec<usize>,
}
/// Solves the knapsack problem and returns the optimal profit, total weight, and indices of items included.
///
/// # Arguments:
/// * `capacity` - The maximum weight capacity of the knapsack.
/// * `items` - A vector of `Item` structs, each representing an item with weight and value.
///
/// # Returns:
/// A `KnapsackSolution` struct containing:
/// - `optimal_profit` - The maximum profit achievable with the given capacity and items.
/// - `total_weight` - The total weight of items included in the solution.
/// - `item_indices` - Indices of items included in the solution. Indices might not be unique.
///
/// # Note:
/// The indices of items in the solution might not be unique.
/// This function assumes that `items` is non-empty.
///
/// # Complexity:
/// - Time complexity: O(num_items * capacity)
/// - Space complexity: O(num_items * capacity)
///
/// where `num_items` is the number of items and `capacity` is the knapsack capacity.
pub fn knapsack(capacity: usize, items: Vec<Item>) -> KnapsackSolution {
let num_items = items.len();
let item_weights: Vec<usize> = items.iter().map(|item| item.weight).collect();
let item_values: Vec<usize> = items.iter().map(|item| item.value).collect();
let knapsack_matrix = generate_knapsack_matrix(capacity, &item_weights, &item_values);
let items_included =
retrieve_knapsack_items(&item_weights, &knapsack_matrix, num_items, capacity);
let total_weight = items_included
.iter()
.map(|&index| item_weights[index - 1])
.sum();
KnapsackSolution {
optimal_profit: knapsack_matrix[num_items][capacity],
total_weight,
item_indices: items_included,
}
}
/// Generates the knapsack matrix (`num_items`, `capacity`) with maximum values.
///
/// # Arguments:
/// * `capacity` - knapsack capacity
/// * `item_weights` - weights of each item
/// * `item_values` - values of each item
fn generate_knapsack_matrix(
capacity: usize,
item_weights: &[usize],
item_values: &[usize],
) -> Vec<Vec<usize>> {
let num_items = item_weights.len();
(0..=num_items).fold(
vec![vec![0; capacity + 1]; num_items + 1],
|mut matrix, item_index| {
(0..=capacity).for_each(|current_capacity| {
matrix[item_index][current_capacity] = if item_index == 0 || current_capacity == 0 {
0
} else if item_weights[item_index - 1] <= current_capacity {
usize::max(
item_values[item_index - 1]
+ matrix[item_index - 1]
[current_capacity - item_weights[item_index - 1]],
matrix[item_index - 1][current_capacity],
)
} else {
matrix[item_index - 1][current_capacity]
};
});
matrix
},
)
}
/// Retrieves the indices of items included in the optimal knapsack solution.
///
/// # Arguments:
/// * `item_weights` - weights of each item
/// * `knapsack_matrix` - knapsack matrix with maximum values
/// * `item_index` - number of items to consider (initially the total number of items)
/// * `remaining_capacity` - remaining capacity of the knapsack
///
/// # Returns
/// A vector of item indices included in the optimal solution. The indices might not be unique.
fn retrieve_knapsack_items(
item_weights: &[usize],
knapsack_matrix: &[Vec<usize>],
item_index: usize,
remaining_capacity: usize,
) -> Vec<usize> {
match item_index {
0 => vec![],
_ => {
let current_value = knapsack_matrix[item_index][remaining_capacity];
let previous_value = knapsack_matrix[item_index - 1][remaining_capacity];
match current_value.cmp(&previous_value) {
Ordering::Greater => {
let mut knap = retrieve_knapsack_items(
item_weights,
knapsack_matrix,
item_index - 1,
remaining_capacity - item_weights[item_index - 1],
);
knap.push(item_index);
knap
}
Ordering::Equal | Ordering::Less => retrieve_knapsack_items(
item_weights,
knapsack_matrix,
item_index - 1,
remaining_capacity,
),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! knapsack_tests {
($($name:ident: $test_case:expr,)*) => {
$(
#[test]
fn $name() {
let (capacity, items, expected) = $test_case;
assert_eq!(expected, knapsack(capacity, items));
}
)*
}
}
knapsack_tests! {
test_basic_knapsack_small: (
165,
vec![
Item { weight: 23, value: 92 },
Item { weight: 31, value: 57 },
Item { weight: 29, value: 49 },
Item { weight: 44, value: 68 },
Item { weight: 53, value: 60 },
Item { weight: 38, value: 43 },
Item { weight: 63, value: 67 },
Item { weight: 85, value: 84 },
Item { weight: 89, value: 87 },
Item { weight: 82, value: 72 }
],
KnapsackSolution {
optimal_profit: 309,
total_weight: 165,
item_indices: vec![1, 2, 3, 4, 6]
}
),
test_basic_knapsack_tiny: (
26,
vec![
Item { weight: 12, value: 24 },
Item { weight: 7, value: 13 },
Item { weight: 11, value: 23 },
Item { weight: 8, value: 15 },
Item { weight: 9, value: 16 }
],
KnapsackSolution {
optimal_profit: 51,
total_weight: 26,
item_indices: vec![2, 3, 4]
}
),
test_basic_knapsack_medium: (
190,
vec![
Item { weight: 56, value: 50 },
Item { weight: 59, value: 50 },
Item { weight: 80, value: 64 },
Item { weight: 64, value: 46 },
Item { weight: 75, value: 50 },
Item { weight: 17, value: 5 }
],
KnapsackSolution {
optimal_profit: 150,
total_weight: 190,
item_indices: vec![1, 2, 5]
}
),
test_diverse_weights_values_small: (
50,
vec![
Item { weight: 31, value: 70 },
Item { weight: 10, value: 20 },
Item { weight: 20, value: 39 },
Item { weight: 19, value: 37 },
Item { weight: 4, value: 7 },
Item { weight: 3, value: 5 },
Item { weight: 6, value: 10 }
],
KnapsackSolution {
optimal_profit: 107,
total_weight: 50,
item_indices: vec![1, 4]
}
),
test_diverse_weights_values_medium: (
104,
vec![
Item { weight: 25, value: 350 },
Item { weight: 35, value: 400 },
Item { weight: 45, value: 450 },
Item { weight: 5, value: 20 },
Item { weight: 25, value: 70 },
Item { weight: 3, value: 8 },
Item { weight: 2, value: 5 },
Item { weight: 2, value: 5 }
],
KnapsackSolution {
optimal_profit: 900,
total_weight: 104,
item_indices: vec![1, 3, 4, 5, 7, 8]
}
),
test_high_value_items: (
170,
vec![
Item { weight: 41, value: 442 },
Item { weight: 50, value: 525 },
Item { weight: 49, value: 511 },
Item { weight: 59, value: 593 },
Item { weight: 55, value: 546 },
Item { weight: 57, value: 564 },
Item { weight: 60, value: 617 }
],
KnapsackSolution {
optimal_profit: 1735,
total_weight: 169,
item_indices: vec![2, 4, 7]
}
),
test_large_knapsack: (
750,
vec![
Item { weight: 70, value: 135 },
Item { weight: 73, value: 139 },
Item { weight: 77, value: 149 },
Item { weight: 80, value: 150 },
Item { weight: 82, value: 156 },
Item { weight: 87, value: 163 },
Item { weight: 90, value: 173 },
Item { weight: 94, value: 184 },
Item { weight: 98, value: 192 },
Item { weight: 106, value: 201 },
Item { weight: 110, value: 210 },
Item { weight: 113, value: 214 },
Item { weight: 115, value: 221 },
Item { weight: 118, value: 229 },
Item { weight: 120, value: 240 }
],
KnapsackSolution {
optimal_profit: 1458,
total_weight: 749,
item_indices: vec![1, 3, 5, 7, 8, 9, 14, 15]
}
),
test_zero_capacity: (
0,
vec![
Item { weight: 1, value: 1 },
Item { weight: 2, value: 2 },
Item { weight: 3, value: 3 }
],
KnapsackSolution {
optimal_profit: 0,
total_weight: 0,
item_indices: vec![]
}
),
test_very_small_capacity: (
1,
vec![
Item { weight: 10, value: 1 },
Item { weight: 20, value: 2 },
Item { weight: 30, value: 3 }
],
KnapsackSolution {
optimal_profit: 0,
total_weight: 0,
item_indices: vec![]
}
),
test_no_items: (
1,
vec![],
KnapsackSolution {
optimal_profit: 0,
total_weight: 0,
item_indices: vec![]
}
),
test_item_too_heavy: (
1,
vec![
Item { weight: 2, value: 100 }
],
KnapsackSolution {
optimal_profit: 0,
total_weight: 0,
item_indices: vec![]
}
),
test_greedy_algorithm_does_not_work: (
10,
vec![
Item { weight: 10, value: 15 },
Item { weight: 6, value: 7 },
Item { weight: 4, value: 9 }
],
KnapsackSolution {
optimal_profit: 16,
total_weight: 10,
item_indices: vec![2, 3]
}
),
test_greedy_algorithm_does_not_work_weight_smaller_than_capacity: (
10,
vec![
Item { weight: 10, value: 15 },
Item { weight: 1, value: 9 },
Item { weight: 2, value: 7 }
],
KnapsackSolution {
optimal_profit: 16,
total_weight: 3,
item_indices: vec![2, 3]
}
),
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub struct Item {\n weight: usize,\n value: usize,\n}"
],
"name": "items",
"type": "Vec<Item>"
}
],
"end_line": 64,
"name": "knapsack",
"signature": "pub fn knapsack(capacity: usize, items: Vec<Item>) -> KnapsackSolution",
"start_line": 45
} | {
"class_name": "",
"class_signature": ""
} |
minimum_cost_path | Rust-master/src/dynamic_programming/minimum_cost_path.rs | pub fn minimum_cost_path(matrix: Vec<Vec<usize>>) -> Result<usize, MatrixError> {
// Check if the matrix is rectangular
if !matrix.iter().all(|row| row.len() == matrix[0].len()) {
return Err(MatrixError::NonRectangularMatrix);
}
// Check if the matrix is empty or contains empty rows
if matrix.is_empty() || matrix.iter().all(|row| row.is_empty()) {
return Err(MatrixError::EmptyMatrix);
}
// Initialize the first row of the cost vector
let mut cost = matrix[0]
.iter()
.scan(0, |acc, &val| {
*acc += val;
Some(*acc)
})
.collect::<Vec<_>>();
// Process each row from the second to the last
for row in matrix.iter().skip(1) {
// Update the first element of cost for this row
cost[0] += row[0];
// Update the rest of the elements in the current row of cost
for col in 1..matrix[0].len() {
cost[col] = row[col] + min(cost[col - 1], cost[col]);
}
}
// The last element in cost contains the minimum path cost to the bottom-right corner
Ok(cost[matrix[0].len() - 1])
} | use std::cmp::min;
/// Represents possible errors that can occur when calculating the minimum cost path in a matrix.
#[derive(Debug, PartialEq, Eq)]
pub enum MatrixError {
/// Error indicating that the matrix is empty or has empty rows.
EmptyMatrix,
/// Error indicating that the matrix is not rectangular in shape.
NonRectangularMatrix,
}
/// Computes the minimum cost path from the top-left to the bottom-right
/// corner of a matrix, where movement is restricted to right and down directions.
///
/// # Arguments
///
/// * `matrix` - A 2D vector of positive integers, where each element represents
/// the cost to step on that cell.
///
/// # Returns
///
/// * `Ok(usize)` - The minimum path cost to reach the bottom-right corner from
/// the top-left corner of the matrix.
/// * `Err(MatrixError)` - An error if the matrix is empty or improperly formatted.
///
/// # Complexity
///
/// * Time complexity: `O(m * n)`, where `m` is the number of rows
/// and `n` is the number of columns in the input matrix.
/// * Space complexity: `O(n)`, as only a single row of cumulative costs
/// is stored at any time.
pub fn minimum_cost_path(matrix: Vec<Vec<usize>>) -> Result<usize, MatrixError> {
// Check if the matrix is rectangular
if !matrix.iter().all(|row| row.len() == matrix[0].len()) {
return Err(MatrixError::NonRectangularMatrix);
}
// Check if the matrix is empty or contains empty rows
if matrix.is_empty() || matrix.iter().all(|row| row.is_empty()) {
return Err(MatrixError::EmptyMatrix);
}
// Initialize the first row of the cost vector
let mut cost = matrix[0]
.iter()
.scan(0, |acc, &val| {
*acc += val;
Some(*acc)
})
.collect::<Vec<_>>();
// Process each row from the second to the last
for row in matrix.iter().skip(1) {
// Update the first element of cost for this row
cost[0] += row[0];
// Update the rest of the elements in the current row of cost
for col in 1..matrix[0].len() {
cost[col] = row[col] + min(cost[col - 1], cost[col]);
}
}
// The last element in cost contains the minimum path cost to the bottom-right corner
Ok(cost[matrix[0].len() - 1])
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! minimum_cost_path_tests {
($($name:ident: $test_case:expr,)*) => {
$(
#[test]
fn $name() {
let (matrix, expected) = $test_case;
assert_eq!(minimum_cost_path(matrix), expected);
}
)*
};
}
minimum_cost_path_tests! {
basic: (
vec![
vec![2, 1, 4],
vec![2, 1, 3],
vec![3, 2, 1]
],
Ok(7)
),
single_element: (
vec![
vec![5]
],
Ok(5)
),
single_row: (
vec![
vec![1, 3, 2, 1, 5]
],
Ok(12)
),
single_column: (
vec![
vec![1],
vec![3],
vec![2],
vec![1],
vec![5]
],
Ok(12)
),
large_matrix: (
vec![
vec![1, 3, 1, 5],
vec![2, 1, 4, 2],
vec![3, 2, 1, 3],
vec![4, 3, 2, 1]
],
Ok(10)
),
uniform_matrix: (
vec![
vec![1, 1, 1],
vec![1, 1, 1],
vec![1, 1, 1]
],
Ok(5)
),
increasing_values: (
vec![
vec![1, 2, 3],
vec![4, 5, 6],
vec![7, 8, 9]
],
Ok(21)
),
high_cost_path: (
vec![
vec![1, 100, 1],
vec![1, 100, 1],
vec![1, 1, 1]
],
Ok(5)
),
complex_matrix: (
vec![
vec![5, 9, 6, 8],
vec![1, 4, 7, 3],
vec![2, 1, 8, 2],
vec![3, 6, 9, 4]
],
Ok(23)
),
empty_matrix: (
vec![],
Err(MatrixError::EmptyMatrix)
),
empty_row: (
vec![
vec![],
vec![],
vec![]
],
Err(MatrixError::EmptyMatrix)
),
non_rectangular: (
vec![
vec![1, 2, 3],
vec![4, 5],
vec![6, 7, 8]
],
Err(MatrixError::NonRectangularMatrix)
),
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "matrix",
"type": "Vec<Vec<usize>>"
}
],
"end_line": 65,
"name": "minimum_cost_path",
"signature": "pub fn minimum_cost_path(matrix: Vec<Vec<usize>>) -> Result<usize, MatrixError>",
"start_line": 32
} | {
"class_name": "",
"class_signature": ""
} |
fractional_knapsack | Rust-master/src/dynamic_programming/fractional_knapsack.rs | pub fn fractional_knapsack(mut capacity: f64, weights: Vec<f64>, values: Vec<f64>) -> f64 {
// vector of tuple of weights and their value/weight ratio
let mut weights: Vec<(f64, f64)> = weights
.iter()
.zip(values.iter())
.map(|(&w, &v)| (w, v / w))
.collect();
// sort in decreasing order by value/weight ratio
weights.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).expect("Encountered NaN"));
dbg!(&weights);
// value to compute
let mut knapsack_value: f64 = 0.0;
// iterate through our vector.
for w in weights {
// w.0 is weight and w.1 value/weight ratio
if w.0 < capacity {
capacity -= w.0; // our sack is filling
knapsack_value += w.0 * w.1;
dbg!(&w.0, &knapsack_value);
} else {
// Multiply with capacity and not w.0
dbg!(&w.0, &knapsack_value);
knapsack_value += capacity * w.1;
break;
}
}
knapsack_value
} | pub fn fractional_knapsack(mut capacity: f64, weights: Vec<f64>, values: Vec<f64>) -> f64 {
// vector of tuple of weights and their value/weight ratio
let mut weights: Vec<(f64, f64)> = weights
.iter()
.zip(values.iter())
.map(|(&w, &v)| (w, v / w))
.collect();
// sort in decreasing order by value/weight ratio
weights.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).expect("Encountered NaN"));
dbg!(&weights);
// value to compute
let mut knapsack_value: f64 = 0.0;
// iterate through our vector.
for w in weights {
// w.0 is weight and w.1 value/weight ratio
if w.0 < capacity {
capacity -= w.0; // our sack is filling
knapsack_value += w.0 * w.1;
dbg!(&w.0, &knapsack_value);
} else {
// Multiply with capacity and not w.0
dbg!(&w.0, &knapsack_value);
knapsack_value += capacity * w.1;
break;
}
}
knapsack_value
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let capacity = 50.0;
let values = vec![60.0, 100.0, 120.0];
let weights = vec![10.0, 20.0, 30.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 240.0);
}
#[test]
fn test2() {
let capacity = 60.0;
let values = vec![280.0, 100.0, 120.0, 120.0];
let weights = vec![40.0, 10.0, 20.0, 24.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 440.0);
}
#[test]
fn test3() {
let capacity = 50.0;
let values = vec![60.0, 100.0, 120.0];
let weights = vec![20.0, 50.0, 30.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 180.0);
}
#[test]
fn test4() {
let capacity = 60.0;
let values = vec![30.0, 40.0, 45.0, 77.0, 90.0];
let weights = vec![5.0, 10.0, 15.0, 22.0, 25.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 230.0);
}
#[test]
fn test5() {
let capacity = 10.0;
let values = vec![500.0];
let weights = vec![30.0];
assert_eq!(
format!("{:.2}", fractional_knapsack(capacity, weights, values)),
String::from("166.67")
);
}
#[test]
fn test6() {
let capacity = 36.0;
let values = vec![25.0, 25.0, 25.0, 6.0, 2.0];
let weights = vec![10.0, 10.0, 10.0, 4.0, 2.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 83.0);
}
#[test]
#[should_panic]
fn test_nan() {
let capacity = 36.0;
// 2nd element is NaN
let values = vec![25.0, f64::NAN, 25.0, 6.0, 2.0];
let weights = vec![10.0, 10.0, 10.0, 4.0, 2.0];
assert_eq!(fractional_knapsack(capacity, weights, values), 83.0);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "weights",
"type": "Vec<f64>"
},
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "values",
"type": "Vec<f64>"
}
],
"end_line": 32,
"name": "fractional_knapsack",
"signature": "pub fn fractional_knapsack(mut capacity: f64, weights: Vec<f64>, values: Vec<f64>) -> f64",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
dutch_national_flag_sort | Rust-master/src/sorting/dutch_national_flag_sort.rs | pub fn dutch_national_flag_sort(mut sequence: Vec<Colors>) -> Vec<Colors> {
// We take ownership of `sequence` because the original `sequence` will be modified and then returned
let length = sequence.len();
if length <= 1 {
return sequence; // Arrays of length 0 or 1 are automatically sorted
}
let mut low = 0;
let mut mid = 0;
let mut high = length - 1;
while mid <= high {
match sequence[mid] {
Red => {
sequence.swap(low, mid);
low += 1;
mid += 1;
}
White => {
mid += 1;
}
Blue => {
sequence.swap(mid, high);
high -= 1;
}
}
}
sequence
} | /*
A Rust implementation of the Dutch National Flag sorting algorithm.
Reference implementation: https://github.com/TheAlgorithms/Python/blob/master/sorts/dutch_national_flag_sort.py
More info: https://en.wikipedia.org/wiki/Dutch_national_flag_problem
*/
#[derive(PartialOrd, PartialEq, Eq)]
pub enum Colors {
Red, // \
White, // | Define the three colors of the Dutch Flag: 🇳🇱
Blue, // /
}
use Colors::*;
// Algorithm implementation
pub fn dutch_national_flag_sort(mut sequence: Vec<Colors>) -> Vec<Colors> {
// We take ownership of `sequence` because the original `sequence` will be modified and then returned
let length = sequence.len();
if length <= 1 {
return sequence; // Arrays of length 0 or 1 are automatically sorted
}
let mut low = 0;
let mut mid = 0;
let mut high = length - 1;
while mid <= high {
match sequence[mid] {
Red => {
sequence.swap(low, mid);
low += 1;
mid += 1;
}
White => {
mid += 1;
}
Blue => {
sequence.swap(mid, high);
high -= 1;
}
}
}
sequence
}
#[cfg(test)]
mod tests {
use super::super::is_sorted;
use super::*;
#[test]
fn random_array() {
let arr = vec![
Red, Blue, White, White, Blue, Blue, Red, Red, White, Blue, White, Red, White, Blue,
];
let arr = dutch_national_flag_sort(arr);
assert!(is_sorted(&arr))
}
#[test]
fn sorted_array() {
let arr = vec![
Red, Red, Red, Red, Red, White, White, White, White, White, Blue, Blue, Blue, Blue,
];
let arr = dutch_national_flag_sort(arr);
assert!(is_sorted(&arr))
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub enum Colors {\n Red, // \\\n White, // | Define the three colors of the Dutch Flag: 🇳🇱\n Blue, // /\n}"
],
"name": "sequence",
"type": "Vec<Colors>"
}
],
"end_line": 43,
"name": "dutch_national_flag_sort",
"signature": "pub fn dutch_national_flag_sort(mut sequence: Vec<Colors>) -> Vec<Colors>",
"start_line": 17
} | {
"class_name": "",
"class_signature": ""
} |
dijkstra | Rust-master/src/graph/dijkstra.rs | pub fn dijkstra(
graph: &Graph<V, E>,
start: V,
) -> BTreeMap<V, Option<(V, E)>> {
let mut ans = BTreeMap::new();
let mut prio = BTreeSet::new();
// start is the special case that doesn't have a predecessor
ans.insert(start, None);
for (new, weight) in &graph[&start] {
ans.insert(*new, Some((start, *weight)));
prio.insert((*weight, *new));
}
while let Some((path_weight, vertex)) = prio.pop_first() {
for (next, weight) in &graph[&vertex] {
let new_weight = path_weight + *weight;
match ans.get(next) {
// if ans[next] is a lower dist than the alternative one, we do nothing
Some(Some((_, dist_next))) if new_weight >= *dist_next => {}
// if ans[next] is None then next is start and so the distance won't be changed, it won't be added again in prio
Some(None) => {}
// the new path is shorter, either new was not in ans or it was farther
_ => {
if let Some(Some((_, prev_weight))) =
ans.insert(*next, Some((vertex, new_weight)))
{
prio.remove(&(prev_weight, *next));
}
prio.insert((new_weight, *next));
}
}
}
}
ans
} | use std::collections::{BTreeMap, BTreeSet};
use std::ops::Add;
type Graph<V, E> = BTreeMap<V, BTreeMap<V, E>>;
// performs Dijsktra's algorithm on the given graph from the given start
// the graph is a positively-weighted directed graph
//
// returns a map that for each reachable vertex associates the distance and the predecessor
// since the start has no predecessor but is reachable, map[start] will be None
//
// Time: O(E * logV). For each vertex, we traverse each edge, resulting in O(E). For each edge, we
// insert a new shortest path for a vertex into the tree, resulting in O(E * logV).
// Space: O(V). The tree holds up to V vertices.
pub fn dijkstra<V: Ord + Copy, E: Ord + Copy + Add<Output = E>>(
graph: &Graph<V, E>,
start: V,
) -> BTreeMap<V, Option<(V, E)>> {
let mut ans = BTreeMap::new();
let mut prio = BTreeSet::new();
// start is the special case that doesn't have a predecessor
ans.insert(start, None);
for (new, weight) in &graph[&start] {
ans.insert(*new, Some((start, *weight)));
prio.insert((*weight, *new));
}
while let Some((path_weight, vertex)) = prio.pop_first() {
for (next, weight) in &graph[&vertex] {
let new_weight = path_weight + *weight;
match ans.get(next) {
// if ans[next] is a lower dist than the alternative one, we do nothing
Some(Some((_, dist_next))) if new_weight >= *dist_next => {}
// if ans[next] is None then next is start and so the distance won't be changed, it won't be added again in prio
Some(None) => {}
// the new path is shorter, either new was not in ans or it was farther
_ => {
if let Some(Some((_, prev_weight))) =
ans.insert(*next, Some((vertex, new_weight)))
{
prio.remove(&(prev_weight, *next));
}
prio.insert((new_weight, *next));
}
}
}
}
ans
}
#[cfg(test)]
mod tests {
use super::{dijkstra, Graph};
use std::collections::BTreeMap;
fn add_edge<V: Ord + Copy, E: Ord>(graph: &mut Graph<V, E>, v1: V, v2: V, c: E) {
graph.entry(v1).or_default().insert(v2, c);
graph.entry(v2).or_default();
}
#[test]
fn single_vertex() {
let mut graph: Graph<usize, usize> = BTreeMap::new();
graph.insert(0, BTreeMap::new());
let mut dists = BTreeMap::new();
dists.insert(0, None);
assert_eq!(dijkstra(&graph, 0), dists);
}
#[test]
fn single_edge() {
let mut graph = BTreeMap::new();
add_edge(&mut graph, 0, 1, 2);
let mut dists_0 = BTreeMap::new();
dists_0.insert(0, None);
dists_0.insert(1, Some((0, 2)));
assert_eq!(dijkstra(&graph, 0), dists_0);
let mut dists_1 = BTreeMap::new();
dists_1.insert(1, None);
assert_eq!(dijkstra(&graph, 1), dists_1);
}
#[test]
fn tree_1() {
let mut graph = BTreeMap::new();
let mut dists = BTreeMap::new();
dists.insert(1, None);
for i in 1..100 {
add_edge(&mut graph, i, i * 2, i * 2);
add_edge(&mut graph, i, i * 2 + 1, i * 2 + 1);
match dists[&i] {
Some((_, d)) => {
dists.insert(i * 2, Some((i, d + i * 2)));
dists.insert(i * 2 + 1, Some((i, d + i * 2 + 1)));
}
None => {
dists.insert(i * 2, Some((i, i * 2)));
dists.insert(i * 2 + 1, Some((i, i * 2 + 1)));
}
}
}
assert_eq!(dijkstra(&graph, 1), dists);
}
#[test]
fn graph_1() {
let mut graph = BTreeMap::new();
add_edge(&mut graph, 'a', 'c', 12);
add_edge(&mut graph, 'a', 'd', 60);
add_edge(&mut graph, 'b', 'a', 10);
add_edge(&mut graph, 'c', 'b', 20);
add_edge(&mut graph, 'c', 'd', 32);
add_edge(&mut graph, 'e', 'a', 7);
let mut dists_a = BTreeMap::new();
dists_a.insert('a', None);
dists_a.insert('c', Some(('a', 12)));
dists_a.insert('d', Some(('c', 44)));
dists_a.insert('b', Some(('c', 32)));
assert_eq!(dijkstra(&graph, 'a'), dists_a);
let mut dists_b = BTreeMap::new();
dists_b.insert('b', None);
dists_b.insert('a', Some(('b', 10)));
dists_b.insert('c', Some(('a', 22)));
dists_b.insert('d', Some(('c', 54)));
assert_eq!(dijkstra(&graph, 'b'), dists_b);
let mut dists_c = BTreeMap::new();
dists_c.insert('c', None);
dists_c.insert('b', Some(('c', 20)));
dists_c.insert('d', Some(('c', 32)));
dists_c.insert('a', Some(('b', 30)));
assert_eq!(dijkstra(&graph, 'c'), dists_c);
let mut dists_d = BTreeMap::new();
dists_d.insert('d', None);
assert_eq!(dijkstra(&graph, 'd'), dists_d);
let mut dists_e = BTreeMap::new();
dists_e.insert('e', None);
dists_e.insert('a', Some(('e', 7)));
dists_e.insert('c', Some(('a', 19)));
dists_e.insert('d', Some(('c', 51)));
dists_e.insert('b', Some(('c', 39)));
assert_eq!(dijkstra(&graph, 'e'), dists_e);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
") -> BTreeMap<V, Option<(V, E)>> {\n let mut ans = BTreeMap::new();\n let mut prio = BTreeSet::new();\n\n // start is the special case that doesn't have a predecessor\n ans.insert(start, None);\n\n for (new, weight) in &graph[&start] {\n ans.insert(*new, Some((start, *weight)));\n prio.insert((*weight, *new));\n }\n\n while let Some((path_weight, vertex)) = prio.pop_first() {\n for (next, weight) in &graph[&vertex] {\n let new_weight = path_weight + *weight;\n match ans.get(next) {\n // if ans[next] is a lower dist than the alternative one, we do nothing\n Some(Some((_, dist_next))) if new_weight >= *dist_next => {}\n // if ans[next] is None then next is start and so the distance won't be changed, it won't be added again in prio\n Some(None) => {}\n // the new path is shorter, either new was not in ans or it was farther\n _ => {\n if let Some(Some((_, prev_weight))) =\n ans.insert(*next, Some((vertex, new_weight)))\n {\n prio.remove(&(prev_weight, *next));\n }\n prio.insert((new_weight, *next));\n }\n }\n }\n }\n\n ans\n}"
],
"name": "graph",
"type": "&Graph<V, E>"
}
],
"end_line": 52,
"name": "dijkstra",
"signature": "pub fn dijkstra(\n graph: &Graph<V, E>,\n start: V,\n) -> BTreeMap<V, Option<(V, E)>>",
"start_line": 15
} | {
"class_name": "",
"class_signature": ""
} |
astar | Rust-master/src/graph/astar.rs | pub fn astar(
graph: &Graph<V, E>,
start: V,
target: V,
heuristic: impl Fn(V) -> E,
) -> Option<(E, Vec<V>)> {
// traversal front
let mut queue = BinaryHeap::new();
// maps each node to its predecessor in the final path
let mut previous = BTreeMap::new();
// weights[v] is the accumulated weight from start to v
let mut weights = BTreeMap::new();
// initialize traversal
weights.insert(start, E::zero());
queue.push(Candidate {
estimated_weight: heuristic(start),
real_weight: E::zero(),
state: start,
});
while let Some(Candidate {
real_weight,
state: current,
..
}) = queue.pop()
{
if current == target {
break;
}
for (&next, &weight) in &graph[¤t] {
let real_weight = real_weight + weight;
if weights
.get(&next)
.is_none_or(|&weight| real_weight < weight)
{
// current allows us to reach next with lower weight (or at all)
// add next to the front
let estimated_weight = real_weight + heuristic(next);
weights.insert(next, real_weight);
queue.push(Candidate {
estimated_weight,
real_weight,
state: next,
});
previous.insert(next, current);
}
}
}
let weight = if let Some(&weight) = weights.get(&target) {
weight
} else {
// we did not reach target from start
return None;
};
// build path in reverse
let mut current = target;
let mut path = vec![current];
while current != start {
let prev = previous
.get(¤t)
.copied()
.expect("We reached the target, but are unable to reconsistute the path");
current = prev;
path.push(current);
}
path.reverse();
Some((weight, path))
} | use std::{
collections::{BTreeMap, BinaryHeap},
ops::Add,
};
use num_traits::Zero;
type Graph<V, E> = BTreeMap<V, BTreeMap<V, E>>;
#[derive(Clone, Debug, Eq, PartialEq)]
struct Candidate<V, E> {
estimated_weight: E,
real_weight: E,
state: V,
}
impl<V: Ord + Copy, E: Ord + Copy> PartialOrd for Candidate<V, E> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
// Note the inverted order; we want nodes with lesser weight to have
// higher priority
Some(self.cmp(other))
}
}
impl<V: Ord + Copy, E: Ord + Copy> Ord for Candidate<V, E> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// Note the inverted order; we want nodes with lesser weight to have
// higher priority
other.estimated_weight.cmp(&self.estimated_weight)
}
}
pub fn astar<V: Ord + Copy, E: Ord + Copy + Add<Output = E> + Zero>(
graph: &Graph<V, E>,
start: V,
target: V,
heuristic: impl Fn(V) -> E,
) -> Option<(E, Vec<V>)> {
// traversal front
let mut queue = BinaryHeap::new();
// maps each node to its predecessor in the final path
let mut previous = BTreeMap::new();
// weights[v] is the accumulated weight from start to v
let mut weights = BTreeMap::new();
// initialize traversal
weights.insert(start, E::zero());
queue.push(Candidate {
estimated_weight: heuristic(start),
real_weight: E::zero(),
state: start,
});
while let Some(Candidate {
real_weight,
state: current,
..
}) = queue.pop()
{
if current == target {
break;
}
for (&next, &weight) in &graph[¤t] {
let real_weight = real_weight + weight;
if weights
.get(&next)
.is_none_or(|&weight| real_weight < weight)
{
// current allows us to reach next with lower weight (or at all)
// add next to the front
let estimated_weight = real_weight + heuristic(next);
weights.insert(next, real_weight);
queue.push(Candidate {
estimated_weight,
real_weight,
state: next,
});
previous.insert(next, current);
}
}
}
let weight = if let Some(&weight) = weights.get(&target) {
weight
} else {
// we did not reach target from start
return None;
};
// build path in reverse
let mut current = target;
let mut path = vec![current];
while current != start {
let prev = previous
.get(¤t)
.copied()
.expect("We reached the target, but are unable to reconsistute the path");
current = prev;
path.push(current);
}
path.reverse();
Some((weight, path))
}
#[cfg(test)]
mod tests {
use super::{astar, Graph};
use num_traits::Zero;
use std::collections::BTreeMap;
// the null heuristic make A* equivalent to Dijkstra
fn null_heuristic<V, E: Zero>(_v: V) -> E {
E::zero()
}
fn add_edge<V: Ord + Copy, E: Ord>(graph: &mut Graph<V, E>, v1: V, v2: V, c: E) {
graph.entry(v1).or_default().insert(v2, c);
graph.entry(v2).or_default();
}
#[test]
fn single_vertex() {
let mut graph: Graph<usize, usize> = BTreeMap::new();
graph.insert(0, BTreeMap::new());
assert_eq!(astar(&graph, 0, 0, null_heuristic), Some((0, vec![0])));
assert_eq!(astar(&graph, 0, 1, null_heuristic), None);
}
#[test]
fn single_edge() {
let mut graph = BTreeMap::new();
add_edge(&mut graph, 0, 1, 2);
assert_eq!(astar(&graph, 0, 1, null_heuristic), Some((2, vec![0, 1])));
assert_eq!(astar(&graph, 1, 0, null_heuristic), None);
}
#[test]
fn graph_1() {
let mut graph = BTreeMap::new();
add_edge(&mut graph, 'a', 'c', 12);
add_edge(&mut graph, 'a', 'd', 60);
add_edge(&mut graph, 'b', 'a', 10);
add_edge(&mut graph, 'c', 'b', 20);
add_edge(&mut graph, 'c', 'd', 32);
add_edge(&mut graph, 'e', 'a', 7);
// from a
assert_eq!(
astar(&graph, 'a', 'a', null_heuristic),
Some((0, vec!['a']))
);
assert_eq!(
astar(&graph, 'a', 'b', null_heuristic),
Some((32, vec!['a', 'c', 'b']))
);
assert_eq!(
astar(&graph, 'a', 'c', null_heuristic),
Some((12, vec!['a', 'c']))
);
assert_eq!(
astar(&graph, 'a', 'd', null_heuristic),
Some((12 + 32, vec!['a', 'c', 'd']))
);
assert_eq!(astar(&graph, 'a', 'e', null_heuristic), None);
// from b
assert_eq!(
astar(&graph, 'b', 'a', null_heuristic),
Some((10, vec!['b', 'a']))
);
assert_eq!(
astar(&graph, 'b', 'b', null_heuristic),
Some((0, vec!['b']))
);
assert_eq!(
astar(&graph, 'b', 'c', null_heuristic),
Some((10 + 12, vec!['b', 'a', 'c']))
);
assert_eq!(
astar(&graph, 'b', 'd', null_heuristic),
Some((10 + 12 + 32, vec!['b', 'a', 'c', 'd']))
);
assert_eq!(astar(&graph, 'b', 'e', null_heuristic), None);
// from c
assert_eq!(
astar(&graph, 'c', 'a', null_heuristic),
Some((20 + 10, vec!['c', 'b', 'a']))
);
assert_eq!(
astar(&graph, 'c', 'b', null_heuristic),
Some((20, vec!['c', 'b']))
);
assert_eq!(
astar(&graph, 'c', 'c', null_heuristic),
Some((0, vec!['c']))
);
assert_eq!(
astar(&graph, 'c', 'd', null_heuristic),
Some((32, vec!['c', 'd']))
);
assert_eq!(astar(&graph, 'c', 'e', null_heuristic), None);
// from d
assert_eq!(astar(&graph, 'd', 'a', null_heuristic), None);
assert_eq!(astar(&graph, 'd', 'b', null_heuristic), None);
assert_eq!(astar(&graph, 'd', 'c', null_heuristic), None);
assert_eq!(
astar(&graph, 'd', 'd', null_heuristic),
Some((0, vec!['d']))
);
assert_eq!(astar(&graph, 'd', 'e', null_heuristic), None);
// from e
assert_eq!(
astar(&graph, 'e', 'a', null_heuristic),
Some((7, vec!['e', 'a']))
);
assert_eq!(
astar(&graph, 'e', 'b', null_heuristic),
Some((7 + 12 + 20, vec!['e', 'a', 'c', 'b']))
);
assert_eq!(
astar(&graph, 'e', 'c', null_heuristic),
Some((7 + 12, vec!['e', 'a', 'c']))
);
assert_eq!(
astar(&graph, 'e', 'd', null_heuristic),
Some((7 + 12 + 32, vec!['e', 'a', 'c', 'd']))
);
assert_eq!(
astar(&graph, 'e', 'e', null_heuristic),
Some((0, vec!['e']))
);
}
#[test]
fn test_heuristic() {
// make a grid
let mut graph = BTreeMap::new();
let rows = 100;
let cols = 100;
for row in 0..rows {
for col in 0..cols {
add_edge(&mut graph, (row, col), (row + 1, col), 1);
add_edge(&mut graph, (row, col), (row, col + 1), 1);
add_edge(&mut graph, (row, col), (row + 1, col + 1), 1);
add_edge(&mut graph, (row + 1, col), (row, col), 1);
add_edge(&mut graph, (row + 1, col + 1), (row, col), 1);
}
}
// Dijkstra would explore most of the 101 × 101 nodes
// the heuristic should allow exploring only about 200 nodes
let now = std::time::Instant::now();
let res = astar(&graph, (0, 0), (100, 90), |(i, j)| 100 - i + 90 - j);
assert!(now.elapsed() < std::time::Duration::from_millis(10));
let (weight, path) = res.unwrap();
assert_eq!(weight, 100);
assert_eq!(path.len(), 101);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"struct Candidate<V, E> {\n estimated_weight: E,\n real_weight: E,\n state: V,\n}"
],
"name": "graph",
"type": "&Graph<V, E>"
}
],
"end_line": 99,
"name": "astar",
"signature": "pub fn astar(\n graph: &Graph<V, E>,\n start: V,\n target: V,\n heuristic: impl Fn(V) -> E,\n) -> Option<(E, Vec<V>)>",
"start_line": 33
} | {
"class_name": "",
"class_signature": ""
} |
ford_fulkerson | Rust-master/src/graph/ford_fulkerson.rs | pub fn ford_fulkerson(
graph: &[Vec<usize>],
source: usize,
sink: usize,
) -> Result<usize, FordFulkersonError> {
validate_ford_fulkerson_input(graph, source, sink)?;
let mut residual_graph = graph.to_owned();
let mut parent = vec![usize::MAX; graph.len()];
let mut max_flow = 0;
while bfs(&residual_graph, source, sink, &mut parent) {
let mut path_flow = usize::MAX;
let mut previous_vertex = sink;
while previous_vertex != source {
let current_vertex = parent[previous_vertex];
path_flow = path_flow.min(residual_graph[current_vertex][previous_vertex]);
previous_vertex = current_vertex;
}
previous_vertex = sink;
while previous_vertex != source {
let current_vertex = parent[previous_vertex];
residual_graph[current_vertex][previous_vertex] -= path_flow;
residual_graph[previous_vertex][current_vertex] += path_flow;
previous_vertex = current_vertex;
}
max_flow += path_flow;
}
Ok(max_flow)
} | //! The Ford-Fulkerson algorithm is a widely used algorithm to solve the maximum flow problem in a flow network.
//!
//! The maximum flow problem involves determining the maximum amount of flow that can be sent from a source vertex to a sink vertex
//! in a directed weighted graph, subject to capacity constraints on the edges.
use std::collections::VecDeque;
/// Enum representing the possible errors that can occur when running the Ford-Fulkerson algorithm.
#[derive(Debug, PartialEq)]
pub enum FordFulkersonError {
EmptyGraph,
ImproperGraph,
SourceOutOfBounds,
SinkOutOfBounds,
}
/// Performs a Breadth-First Search (BFS) on the residual graph to find an augmenting path
/// from the source vertex `source` to the sink vertex `sink`.
///
/// # Arguments
///
/// * `graph` - A reference to the residual graph represented as an adjacency matrix.
/// * `source` - The source vertex.
/// * `sink` - The sink vertex.
/// * `parent` - A mutable reference to the parent array used to store the augmenting path.
///
/// # Returns
///
/// Returns `true` if an augmenting path is found from `source` to `sink`, `false` otherwise.
fn bfs(graph: &[Vec<usize>], source: usize, sink: usize, parent: &mut [usize]) -> bool {
let mut visited = vec![false; graph.len()];
visited[source] = true;
parent[source] = usize::MAX;
let mut queue = VecDeque::new();
queue.push_back(source);
while let Some(current_vertex) = queue.pop_front() {
for (previous_vertex, &capacity) in graph[current_vertex].iter().enumerate() {
if !visited[previous_vertex] && capacity > 0 {
visited[previous_vertex] = true;
parent[previous_vertex] = current_vertex;
if previous_vertex == sink {
return true;
}
queue.push_back(previous_vertex);
}
}
}
false
}
/// Validates the input parameters for the Ford-Fulkerson algorithm.
///
/// This function checks if the provided graph, source vertex, and sink vertex
/// meet the requirements for the Ford-Fulkerson algorithm. It ensures the graph
/// is non-empty, square (each row has the same length as the number of rows), and
/// that the source and sink vertices are within the valid range of vertex indices.
///
/// # Arguments
///
/// * `graph` - A reference to the flow network represented as an adjacency matrix.
/// * `source` - The source vertex.
/// * `sink` - The sink vertex.
///
/// # Returns
///
/// Returns `Ok(())` if the input parameters are valid, otherwise returns an appropriate
/// `FordFulkersonError`.
fn validate_ford_fulkerson_input(
graph: &[Vec<usize>],
source: usize,
sink: usize,
) -> Result<(), FordFulkersonError> {
if graph.is_empty() {
return Err(FordFulkersonError::EmptyGraph);
}
if graph.iter().any(|row| row.len() != graph.len()) {
return Err(FordFulkersonError::ImproperGraph);
}
if source >= graph.len() {
return Err(FordFulkersonError::SourceOutOfBounds);
}
if sink >= graph.len() {
return Err(FordFulkersonError::SinkOutOfBounds);
}
Ok(())
}
/// Applies the Ford-Fulkerson algorithm to find the maximum flow in a flow network
/// represented by a weighted directed graph.
///
/// # Arguments
///
/// * `graph` - A mutable reference to the flow network represented as an adjacency matrix.
/// * `source` - The source vertex.
/// * `sink` - The sink vertex.
///
/// # Returns
///
/// Returns the maximum flow and the residual graph
pub fn ford_fulkerson(
graph: &[Vec<usize>],
source: usize,
sink: usize,
) -> Result<usize, FordFulkersonError> {
validate_ford_fulkerson_input(graph, source, sink)?;
let mut residual_graph = graph.to_owned();
let mut parent = vec![usize::MAX; graph.len()];
let mut max_flow = 0;
while bfs(&residual_graph, source, sink, &mut parent) {
let mut path_flow = usize::MAX;
let mut previous_vertex = sink;
while previous_vertex != source {
let current_vertex = parent[previous_vertex];
path_flow = path_flow.min(residual_graph[current_vertex][previous_vertex]);
previous_vertex = current_vertex;
}
previous_vertex = sink;
while previous_vertex != source {
let current_vertex = parent[previous_vertex];
residual_graph[current_vertex][previous_vertex] -= path_flow;
residual_graph[previous_vertex][current_vertex] += path_flow;
previous_vertex = current_vertex;
}
max_flow += path_flow;
}
Ok(max_flow)
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! test_max_flow {
($($name:ident: $tc:expr,)* ) => {
$(
#[test]
fn $name() {
let (graph, source, sink, expected_result) = $tc;
assert_eq!(ford_fulkerson(&graph, source, sink), expected_result);
}
)*
};
}
test_max_flow! {
test_empty_graph: (
vec![],
0,
0,
Err(FordFulkersonError::EmptyGraph),
),
test_source_out_of_bound: (
vec![
vec![0, 8, 0, 0, 3, 0],
vec![0, 0, 9, 0, 0, 0],
vec![0, 0, 0, 0, 7, 2],
vec![0, 0, 0, 0, 0, 5],
vec![0, 0, 7, 4, 0, 0],
vec![0, 0, 0, 0, 0, 0],
],
6,
5,
Err(FordFulkersonError::SourceOutOfBounds),
),
test_sink_out_of_bound: (
vec![
vec![0, 8, 0, 0, 3, 0],
vec![0, 0, 9, 0, 0, 0],
vec![0, 0, 0, 0, 7, 2],
vec![0, 0, 0, 0, 0, 5],
vec![0, 0, 7, 4, 0, 0],
vec![0, 0, 0, 0, 0, 0],
],
0,
6,
Err(FordFulkersonError::SinkOutOfBounds),
),
test_improper_graph: (
vec![
vec![0, 8],
vec![0],
],
0,
1,
Err(FordFulkersonError::ImproperGraph),
),
test_graph_with_small_flow: (
vec![
vec![0, 8, 0, 0, 3, 0],
vec![0, 0, 9, 0, 0, 0],
vec![0, 0, 0, 0, 7, 2],
vec![0, 0, 0, 0, 0, 5],
vec![0, 0, 7, 4, 0, 0],
vec![0, 0, 0, 0, 0, 0],
],
0,
5,
Ok(6),
),
test_graph_with_medium_flow: (
vec![
vec![0, 10, 0, 10, 0, 0],
vec![0, 0, 4, 2, 8, 0],
vec![0, 0, 0, 0, 0, 10],
vec![0, 0, 0, 0, 9, 0],
vec![0, 0, 6, 0, 0, 10],
vec![0, 0, 0, 0, 0, 0],
],
0,
5,
Ok(19),
),
test_graph_with_large_flow: (
vec![
vec![0, 12, 0, 13, 0, 0],
vec![0, 0, 10, 0, 0, 0],
vec![0, 0, 0, 13, 3, 15],
vec![0, 0, 7, 0, 15, 0],
vec![0, 0, 6, 0, 0, 17],
vec![0, 0, 0, 0, 0, 0],
],
0,
5,
Ok(23),
),
test_complex_graph: (
vec![
vec![0, 16, 13, 0, 0, 0],
vec![0, 0, 10, 12, 0, 0],
vec![0, 4, 0, 0, 14, 0],
vec![0, 0, 9, 0, 0, 20],
vec![0, 0, 0, 7, 0, 4],
vec![0, 0, 0, 0, 0, 0],
],
0,
5,
Ok(23),
),
test_disconnected_graph: (
vec![
vec![0, 0, 0, 0],
vec![0, 0, 0, 1],
vec![0, 0, 0, 1],
vec![0, 0, 0, 0],
],
0,
3,
Ok(0),
),
test_unconnected_sink: (
vec![
vec![0, 4, 0, 3, 0, 0],
vec![0, 0, 4, 0, 8, 0],
vec![0, 0, 0, 3, 0, 2],
vec![0, 0, 0, 0, 6, 0],
vec![0, 0, 6, 0, 0, 6],
vec![0, 0, 0, 0, 0, 0],
],
0,
5,
Ok(7),
),
test_no_edges: (
vec![
vec![0, 0, 0],
vec![0, 0, 0],
vec![0, 0, 0],
],
0,
2,
Ok(0),
),
test_single_vertex: (
vec![
vec![0],
],
0,
0,
Ok(0),
),
test_self_loop: (
vec![
vec![10, 0],
vec![0, 0],
],
0,
1,
Ok(0),
),
test_same_source_sink: (
vec![
vec![0, 10, 10],
vec![0, 0, 10],
vec![0, 0, 0],
],
0,
0,
Ok(0),
),
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}"
],
"name": "graph",
"type": "&[Vec<usize>]"
}
],
"end_line": 140,
"name": "ford_fulkerson",
"signature": "pub fn ford_fulkerson(\n graph: &[Vec<usize>],\n source: usize,\n sink: usize,\n) -> Result<usize, FordFulkersonError>",
"start_line": 107
} | {
"class_name": "",
"class_signature": ""
} |
breadth_first_search | Rust-master/src/graph/breadth_first_search.rs | pub fn breadth_first_search(graph: &Graph, root: Node, target: Node) -> Option<Vec<u32>> {
let mut visited: HashSet<Node> = HashSet::new();
let mut history: Vec<u32> = Vec::new();
let mut queue = VecDeque::new();
visited.insert(root);
queue.push_back(root);
while let Some(currentnode) = queue.pop_front() {
history.push(currentnode.value());
// If we reach the goal, return our travel history.
if currentnode == target {
return Some(history);
}
// Check the neighboring nodes for any that we've not visited yet.
for neighbor in currentnode.neighbors(graph) {
if visited.insert(neighbor) {
queue.push_back(neighbor);
}
}
}
// All nodes were visited, yet the target was not found.
None
} | use std::collections::HashSet;
use std::collections::VecDeque;
/// Perform a breadth-first search on Graph `graph`.
///
/// # Parameters
///
/// - `graph`: The graph to search.
/// - `root`: The starting node of the graph from which to begin searching.
/// - `target`: The target node for the search.
///
/// # Returns
///
/// If the target is found, an Optional vector is returned with the history
/// of nodes visited as its contents.
///
/// If the target is not found or there is no path from the root,
/// `None` is returned.
///
pub fn breadth_first_search(graph: &Graph, root: Node, target: Node) -> Option<Vec<u32>> {
let mut visited: HashSet<Node> = HashSet::new();
let mut history: Vec<u32> = Vec::new();
let mut queue = VecDeque::new();
visited.insert(root);
queue.push_back(root);
while let Some(currentnode) = queue.pop_front() {
history.push(currentnode.value());
// If we reach the goal, return our travel history.
if currentnode == target {
return Some(history);
}
// Check the neighboring nodes for any that we've not visited yet.
for neighbor in currentnode.neighbors(graph) {
if visited.insert(neighbor) {
queue.push_back(neighbor);
}
}
}
// All nodes were visited, yet the target was not found.
None
}
// Data Structures
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Node(u32);
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Edge(u32, u32);
#[derive(Clone)]
pub struct Graph {
#[allow(dead_code)]
nodes: Vec<Node>,
edges: Vec<Edge>,
}
impl Graph {
pub fn new(nodes: Vec<Node>, edges: Vec<Edge>) -> Self {
Graph { nodes, edges }
}
}
impl From<u32> for Node {
fn from(item: u32) -> Self {
Node(item)
}
}
impl Node {
pub fn value(&self) -> u32 {
self.0
}
pub fn neighbors(&self, graph: &Graph) -> Vec<Node> {
graph
.edges
.iter()
.filter(|e| e.0 == self.0)
.map(|e| e.1.into())
.collect()
}
}
impl From<(u32, u32)> for Edge {
fn from(item: (u32, u32)) -> Self {
Edge(item.0, item.1)
}
}
#[cfg(test)]
mod tests {
use super::*;
/* Example graph #1:
*
* (1) <--- Root
* / \
* (2) (3)
* / | | \
* (4) (5) (6) (7)
* |
* (8)
*/
fn graph1() -> Graph {
let nodes = vec![1, 2, 3, 4, 5, 6, 7];
let edges = vec![(1, 2), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7), (5, 8)];
Graph::new(
nodes.into_iter().map(|v| v.into()).collect(),
edges.into_iter().map(|e| e.into()).collect(),
)
}
#[test]
fn breadth_first_search_graph1_when_node_not_found_returns_none() {
let graph = graph1();
let root = 1;
let target = 10;
assert_eq!(
breadth_first_search(&graph, root.into(), target.into()),
None
);
}
#[test]
fn breadth_first_search_graph1_when_target_8_should_evaluate_all_nodes_first() {
let graph = graph1();
let root = 1;
let target = 8;
let expected_path = vec![1, 2, 3, 4, 5, 6, 7, 8];
assert_eq!(
breadth_first_search(&graph, root.into(), target.into()),
Some(expected_path)
);
}
/* Example graph #2:
*
* (1) --- (2) (3) --- (4)
* / | / /
* / | / /
* / | / /
* (5) (6) --- (7) (8)
*/
fn graph2() -> Graph {
let nodes = vec![1, 2, 3, 4, 5, 6, 7, 8];
let undirected_edges = vec![
(1, 2),
(2, 1),
(2, 5),
(5, 2),
(2, 6),
(6, 2),
(3, 4),
(4, 3),
(3, 6),
(6, 3),
(4, 7),
(7, 4),
(6, 7),
(7, 6),
];
Graph::new(
nodes.into_iter().map(|v| v.into()).collect(),
undirected_edges.into_iter().map(|e| e.into()).collect(),
)
}
#[test]
fn breadth_first_search_graph2_when_no_path_to_node_returns_none() {
let graph = graph2();
let root = 8;
let target = 4;
assert_eq!(
breadth_first_search(&graph, root.into(), target.into()),
None
);
}
#[test]
fn breadth_first_search_graph2_should_find_path_from_4_to_1() {
let graph = graph2();
let root = 4;
let target = 1;
let expected_path = vec![4, 3, 7, 6, 2, 1];
assert_eq!(
breadth_first_search(&graph, root.into(), target.into()),
Some(expected_path)
);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n nodes: Vec<Node>,\n edges: Vec<Edge>,\n}"
],
"name": "graph",
"type": "&Graph"
},
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n nodes: Vec<Node>,\n edges: Vec<Edge>,\n}"
],
"name": "root",
"type": "Node"
},
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n nodes: Vec<Node>,\n edges: Vec<Edge>,\n}"
],
"name": "target",
"type": "Node"
}
],
"end_line": 45,
"name": "breadth_first_search",
"signature": "pub fn breadth_first_search(graph: &Graph, root: Node, target: Node) -> Option<Vec<u32>>",
"start_line": 20
} | {
"class_name": "",
"class_signature": ""
} |
depth_first_search | Rust-master/src/graph/depth_first_search.rs | pub fn depth_first_search(graph: &Graph, root: Vertex, objective: Vertex) -> Option<Vec<u32>> {
let mut visited: HashSet<Vertex> = HashSet::new();
let mut history: Vec<u32> = Vec::new();
let mut queue = VecDeque::new();
queue.push_back(root);
// While there is an element in the queue
// get the first element of the vertex queue
while let Some(current_vertex) = queue.pop_front() {
// Added current vertex in the history of visiteds vertex
history.push(current_vertex.value());
// Verify if this vertex is the objective
if current_vertex == objective {
// Return the Optional with the history of visiteds vertex
return Some(history);
}
// For each over the neighbors of current vertex
for neighbor in current_vertex.neighbors(graph).into_iter().rev() {
// Insert in the HashSet of visiteds if this value not exist yet
if visited.insert(neighbor) {
// Add the neighbor on front of queue
queue.push_front(neighbor);
}
}
}
// If all vertex is visited and the objective is not found
// return a Optional with None value
None
} | use std::collections::HashSet;
use std::collections::VecDeque;
// Perform a Depth First Search Algorithm to find a element in a graph
//
// Return a Optional with a vector with history of vertex visiteds
// or a None if the element not exists on the graph
pub fn depth_first_search(graph: &Graph, root: Vertex, objective: Vertex) -> Option<Vec<u32>> {
let mut visited: HashSet<Vertex> = HashSet::new();
let mut history: Vec<u32> = Vec::new();
let mut queue = VecDeque::new();
queue.push_back(root);
// While there is an element in the queue
// get the first element of the vertex queue
while let Some(current_vertex) = queue.pop_front() {
// Added current vertex in the history of visiteds vertex
history.push(current_vertex.value());
// Verify if this vertex is the objective
if current_vertex == objective {
// Return the Optional with the history of visiteds vertex
return Some(history);
}
// For each over the neighbors of current vertex
for neighbor in current_vertex.neighbors(graph).into_iter().rev() {
// Insert in the HashSet of visiteds if this value not exist yet
if visited.insert(neighbor) {
// Add the neighbor on front of queue
queue.push_front(neighbor);
}
}
}
// If all vertex is visited and the objective is not found
// return a Optional with None value
None
}
// Data Structures
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Vertex(u32);
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Edge(u32, u32);
#[derive(Clone)]
pub struct Graph {
#[allow(dead_code)]
vertices: Vec<Vertex>,
edges: Vec<Edge>,
}
impl Graph {
pub fn new(vertices: Vec<Vertex>, edges: Vec<Edge>) -> Self {
Graph { vertices, edges }
}
}
impl From<u32> for Vertex {
fn from(item: u32) -> Self {
Vertex(item)
}
}
impl Vertex {
pub fn value(&self) -> u32 {
self.0
}
pub fn neighbors(&self, graph: &Graph) -> VecDeque<Vertex> {
graph
.edges
.iter()
.filter(|e| e.0 == self.0)
.map(|e| e.1.into())
.collect()
}
}
impl From<(u32, u32)> for Edge {
fn from(item: (u32, u32)) -> Self {
Edge(item.0, item.1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn find_1_fail() {
let vertices = vec![1, 2, 3, 4, 5, 6, 7];
let edges = vec![(1, 2), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)];
let root = 1;
let objective = 99;
let graph = Graph::new(
vertices.into_iter().map(|v| v.into()).collect(),
edges.into_iter().map(|e| e.into()).collect(),
);
assert_eq!(
depth_first_search(&graph, root.into(), objective.into()),
None
);
}
#[test]
fn find_1_sucess() {
let vertices = vec![1, 2, 3, 4, 5, 6, 7];
let edges = vec![(1, 2), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)];
let root = 1;
let objective = 7;
let correct_path = vec![1, 2, 4, 5, 3, 6, 7];
let graph = Graph::new(
vertices.into_iter().map(|v| v.into()).collect(),
edges.into_iter().map(|e| e.into()).collect(),
);
assert_eq!(
depth_first_search(&graph, root.into(), objective.into()),
Some(correct_path)
);
}
#[test]
fn find_2_sucess() {
let vertices = vec![0, 1, 2, 3, 4, 5, 6, 7];
let edges = vec![
(0, 1),
(1, 3),
(3, 2),
(2, 1),
(3, 4),
(4, 5),
(5, 7),
(7, 6),
(6, 4),
];
let root = 0;
let objective = 6;
let correct_path = vec![0, 1, 3, 2, 4, 5, 7, 6];
let graph = Graph::new(
vertices.into_iter().map(|v| v.into()).collect(),
edges.into_iter().map(|e| e.into()).collect(),
);
assert_eq!(
depth_first_search(&graph, root.into(), objective.into()),
Some(correct_path)
);
}
#[test]
fn find_3_sucess() {
let vertices = vec![0, 1, 2, 3, 4, 5, 6, 7];
let edges = vec![
(0, 1),
(1, 3),
(3, 2),
(2, 1),
(3, 4),
(4, 5),
(5, 7),
(7, 6),
(6, 4),
];
let root = 0;
let objective = 4;
let correct_path = vec![0, 1, 3, 2, 4];
let graph = Graph::new(
vertices.into_iter().map(|v| v.into()).collect(),
edges.into_iter().map(|e| e.into()).collect(),
);
assert_eq!(
depth_first_search(&graph, root.into(), objective.into()),
Some(correct_path)
);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n vertices: Vec<Vertex>,\n edges: Vec<Edge>,\n}"
],
"name": "graph",
"type": "&Graph"
},
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n vertices: Vec<Vertex>,\n edges: Vec<Edge>,\n}"
],
"name": "root",
"type": "Vertex"
},
{
"definitions": [
"pub struct Graph {\n #[allow(dead_code)]\n vertices: Vec<Vertex>,\n edges: Vec<Edge>,\n}"
],
"name": "objective",
"type": "Vertex"
}
],
"end_line": 39,
"name": "depth_first_search",
"signature": "pub fn depth_first_search(graph: &Graph, root: Vertex, objective: Vertex) -> Option<Vec<u32>>",
"start_line": 8
} | {
"class_name": "",
"class_signature": ""
} |
manacher | Rust-master/src/string/manacher.rs | pub fn manacher(s: String) -> String {
let l = s.len();
if l <= 1 {
return s;
}
// MEMO: We need to detect odd palindrome as well,
// therefore, inserting dummy string so that
// we can find a pair with dummy center character.
let mut chars: Vec<char> = Vec::with_capacity(s.len() * 2 + 1);
for c in s.chars() {
chars.push('#');
chars.push(c);
}
chars.push('#');
// List: storing the length of palindrome at each index of string
let mut length_of_palindrome = vec![1usize; chars.len()];
// Integer: Current checking palindrome's center index
let mut current_center: usize = 0;
// Integer: Right edge index existing the radius away from current center
let mut right_from_current_center: usize = 0;
for i in 0..chars.len() {
// 1: Check if we are looking at right side of palindrome.
if right_from_current_center > i && i > current_center {
// 1-1: If so copy from the left side of palindrome.
// If the value + index exceeds the right edge index, we should cut and check palindrome later #3.
length_of_palindrome[i] = std::cmp::min(
right_from_current_center - i,
length_of_palindrome[2 * current_center - i],
);
// 1-2: Move the checking palindrome to new index if it exceeds the right edge.
if length_of_palindrome[i] + i >= right_from_current_center {
current_center = i;
right_from_current_center = length_of_palindrome[i] + i;
// 1-3: If radius exceeds the end of list, it means checking is over.
// You will never get the larger value because the string will get only shorter.
if right_from_current_center >= chars.len() - 1 {
break;
}
} else {
// 1-4: If the checking index doesn't exceeds the right edge,
// it means the length is just as same as the left side.
// You don't need to check anymore.
continue;
}
}
// Integer: Current radius from checking index
// If it's copied from left side and more than 1,
// it means it's ensured so you don't need to check inside radius.
let mut radius: usize = (length_of_palindrome[i] - 1) / 2;
radius += 1;
// 2: Checking palindrome.
// Need to care about overflow usize.
while i >= radius && i + radius <= chars.len() - 1 && chars[i - radius] == chars[i + radius]
{
length_of_palindrome[i] += 2;
radius += 1;
}
}
// 3: Find the maximum length and generate answer.
let center_of_max = length_of_palindrome
.iter()
.enumerate()
.max_by_key(|(_, &value)| value)
.map(|(idx, _)| idx)
.unwrap();
let radius_of_max = (length_of_palindrome[center_of_max] - 1) / 2;
let answer = &chars[(center_of_max - radius_of_max)..=(center_of_max + radius_of_max)]
.iter()
.collect::<String>();
answer.replace('#', "")
} | pub fn manacher(s: String) -> String {
let l = s.len();
if l <= 1 {
return s;
}
// MEMO: We need to detect odd palindrome as well,
// therefore, inserting dummy string so that
// we can find a pair with dummy center character.
let mut chars: Vec<char> = Vec::with_capacity(s.len() * 2 + 1);
for c in s.chars() {
chars.push('#');
chars.push(c);
}
chars.push('#');
// List: storing the length of palindrome at each index of string
let mut length_of_palindrome = vec![1usize; chars.len()];
// Integer: Current checking palindrome's center index
let mut current_center: usize = 0;
// Integer: Right edge index existing the radius away from current center
let mut right_from_current_center: usize = 0;
for i in 0..chars.len() {
// 1: Check if we are looking at right side of palindrome.
if right_from_current_center > i && i > current_center {
// 1-1: If so copy from the left side of palindrome.
// If the value + index exceeds the right edge index, we should cut and check palindrome later #3.
length_of_palindrome[i] = std::cmp::min(
right_from_current_center - i,
length_of_palindrome[2 * current_center - i],
);
// 1-2: Move the checking palindrome to new index if it exceeds the right edge.
if length_of_palindrome[i] + i >= right_from_current_center {
current_center = i;
right_from_current_center = length_of_palindrome[i] + i;
// 1-3: If radius exceeds the end of list, it means checking is over.
// You will never get the larger value because the string will get only shorter.
if right_from_current_center >= chars.len() - 1 {
break;
}
} else {
// 1-4: If the checking index doesn't exceeds the right edge,
// it means the length is just as same as the left side.
// You don't need to check anymore.
continue;
}
}
// Integer: Current radius from checking index
// If it's copied from left side and more than 1,
// it means it's ensured so you don't need to check inside radius.
let mut radius: usize = (length_of_palindrome[i] - 1) / 2;
radius += 1;
// 2: Checking palindrome.
// Need to care about overflow usize.
while i >= radius && i + radius <= chars.len() - 1 && chars[i - radius] == chars[i + radius]
{
length_of_palindrome[i] += 2;
radius += 1;
}
}
// 3: Find the maximum length and generate answer.
let center_of_max = length_of_palindrome
.iter()
.enumerate()
.max_by_key(|(_, &value)| value)
.map(|(idx, _)| idx)
.unwrap();
let radius_of_max = (length_of_palindrome[center_of_max] - 1) / 2;
let answer = &chars[(center_of_max - radius_of_max)..=(center_of_max + radius_of_max)]
.iter()
.collect::<String>();
answer.replace('#', "")
}
#[cfg(test)]
mod tests {
use super::manacher;
#[test]
fn get_longest_palindrome_by_manacher() {
assert_eq!(manacher("babad".to_string()), "aba".to_string());
assert_eq!(manacher("cbbd".to_string()), "bb".to_string());
assert_eq!(manacher("a".to_string()), "a".to_string());
let ac_ans = manacher("ac".to_string());
assert!(ac_ans == *"a" || ac_ans == *"c");
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct String {\n vec: Vec<u8>,\n}"
],
"name": "s",
"type": "String"
}
],
"end_line": 76,
"name": "manacher",
"signature": "pub fn manacher(s: String) -> String",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
kth_smallest_heap | Rust-master/src/searching/kth_smallest_heap.rs | pub fn kth_smallest_heap(input: &[T], k: usize) -> Option<T> {
if input.len() < k {
return None;
}
// heap will maintain the kth smallest elements
// seen so far, when new elements, E_new arrives,
// it is compared with the largest element of the
// current Heap E_large, which is the current kth
// smallest elements.
// if E_new > E_large, then E_new cannot be the kth
// smallest because there are already k elements smaller
// than it
// otherwise, E_large cannot be the kth smallest, and should
// be removed from the heap and E_new should be added
let mut heap = Heap::new_max();
// first k elements goes to the heap as the baseline
for &val in input.iter().take(k) {
heap.add(val);
}
for &val in input.iter().skip(k) {
// compare new value to the current kth smallest value
let cur_big = heap.pop().unwrap(); // heap.pop() can't be None
match val.cmp(&cur_big) {
Ordering::Greater => {
heap.add(cur_big);
}
_ => {
heap.add(val);
}
}
}
heap.pop()
} | use crate::data_structures::Heap;
use std::cmp::{Ord, Ordering};
/// Returns k-th smallest element of an array.
/// Time complexity is stably O(nlog(k)) in all cases
/// Extra space is required to maintain the heap, and it doesn't
/// mutate the input list.
///
/// It is preferrable to the partition-based algorithm in cases when
/// we want to maintain the kth smallest element dynamically against
/// a stream of elements. In that case, once the heap is built, further
/// operation's complexity is O(log(k)).
pub fn kth_smallest_heap<T>(input: &[T], k: usize) -> Option<T>
where
T: Ord + Copy,
{
if input.len() < k {
return None;
}
// heap will maintain the kth smallest elements
// seen so far, when new elements, E_new arrives,
// it is compared with the largest element of the
// current Heap E_large, which is the current kth
// smallest elements.
// if E_new > E_large, then E_new cannot be the kth
// smallest because there are already k elements smaller
// than it
// otherwise, E_large cannot be the kth smallest, and should
// be removed from the heap and E_new should be added
let mut heap = Heap::new_max();
// first k elements goes to the heap as the baseline
for &val in input.iter().take(k) {
heap.add(val);
}
for &val in input.iter().skip(k) {
// compare new value to the current kth smallest value
let cur_big = heap.pop().unwrap(); // heap.pop() can't be None
match val.cmp(&cur_big) {
Ordering::Greater => {
heap.add(cur_big);
}
_ => {
heap.add(val);
}
}
}
heap.pop()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty() {
let zero: [u8; 0] = [];
let first = kth_smallest_heap(&zero, 1);
assert_eq!(None, first);
}
#[test]
fn one_element() {
let one = [1];
let first = kth_smallest_heap(&one, 1);
assert_eq!(1, first.unwrap());
}
#[test]
fn many_elements() {
// 0 1 3 4 5 7 8 9 9 10 12 13 16 17
let many = [9, 17, 3, 16, 13, 10, 1, 5, 7, 12, 4, 8, 9, 0];
let first = kth_smallest_heap(&many, 1);
let third = kth_smallest_heap(&many, 3);
let sixth = kth_smallest_heap(&many, 6);
let fourteenth = kth_smallest_heap(&many, 14);
assert_eq!(0, first.unwrap());
assert_eq!(3, third.unwrap());
assert_eq!(7, sixth.unwrap());
assert_eq!(17, fourteenth.unwrap());
}
}
| rust | {
"argument_definitions": [],
"end_line": 52,
"name": "kth_smallest_heap",
"signature": "pub fn kth_smallest_heap(input: &[T], k: usize) -> Option<T>",
"start_line": 13
} | {
"class_name": "",
"class_signature": ""
} |
compute_totient | Rust-master/src/number_theory/compute_totient.rs | pub fn compute_totient(n: i32) -> vec::Vec<i32> {
let mut phi: Vec<i32> = Vec::new();
// initialize phi[i] = i
for i in 0..=n {
phi.push(i);
}
// Compute other Phi values
for p in 2..=n {
// If phi[p] is not computed already,
// then number p is prime
if phi[(p) as usize] == p {
// Phi of a prime number p is
// always equal to p-1.
phi[(p) as usize] = p - 1;
// Update phi values of all
// multiples of p
for i in ((2 * p)..=n).step_by(p as usize) {
phi[(i) as usize] = (phi[i as usize] / p) * (p - 1);
}
}
}
phi[1..].to_vec()
} | // Totient function for
// all numbers smaller than
// or equal to n.
// Computes and prints
// totient of all numbers
// smaller than or equal to n
use std::vec;
pub fn compute_totient(n: i32) -> vec::Vec<i32> {
let mut phi: Vec<i32> = Vec::new();
// initialize phi[i] = i
for i in 0..=n {
phi.push(i);
}
// Compute other Phi values
for p in 2..=n {
// If phi[p] is not computed already,
// then number p is prime
if phi[(p) as usize] == p {
// Phi of a prime number p is
// always equal to p-1.
phi[(p) as usize] = p - 1;
// Update phi values of all
// multiples of p
for i in ((2 * p)..=n).step_by(p as usize) {
phi[(i) as usize] = (phi[i as usize] / p) * (p - 1);
}
}
}
phi[1..].to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_1() {
assert_eq!(
compute_totient(12),
vec![1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4]
);
}
#[test]
fn test_2() {
assert_eq!(compute_totient(7), vec![1, 1, 2, 2, 4, 2, 6]);
}
#[test]
fn test_3() {
assert_eq!(compute_totient(4), vec![1, 1, 2, 2]);
}
}
| rust | {
"argument_definitions": [],
"end_line": 37,
"name": "compute_totient",
"signature": "pub fn compute_totient(n: i32) -> vec::Vec<i32>",
"start_line": 11
} | {
"class_name": "",
"class_signature": ""
} |
fast_factorial | Rust-master/src/big_integer/fast_factorial.rs | pub fn fast_factorial(n: usize) -> BigUint {
if n < 2 {
return BigUint::one();
}
// get list of primes that will be factors of n!
let primes = sieve_of_eratosthenes(n);
// Map the primes with their index
let p_indices = primes
.into_iter()
.map(|p| (p, index(p, n)))
.collect::<BTreeMap<_, _>>();
let max_bits = p_indices[&2].next_power_of_two().ilog2() + 1;
// Create a Vec of 1's
let mut a = vec![BigUint::one(); max_bits as usize];
// For every prime p, multiply a[i] by p if the ith bit of p's index is 1
for (p, i) in p_indices {
let mut bit = 1usize;
while bit.ilog2() < max_bits {
if (bit & i) > 0 {
a[bit.ilog2() as usize] *= p;
}
bit <<= 1;
}
}
a.into_iter()
.enumerate()
.map(|(i, a_i)| a_i.pow(2u32.pow(i as u32))) // raise every a[i] to the 2^ith power
.product() // we get our answer by multiplying the result
} | // Algorithm created by Peter Borwein in 1985
// https://doi.org/10.1016/0196-6774(85)90006-9
use crate::math::sieve_of_eratosthenes;
use num_bigint::BigUint;
use num_traits::One;
use std::collections::BTreeMap;
/// Calculate the sum of n / p^i with integer division for all values of i
fn index(p: usize, n: usize) -> usize {
let mut index = 0;
let mut i = 1;
let mut quot = n / p;
while quot > 0 {
index += quot;
i += 1;
quot = n / p.pow(i);
}
index
}
/// Calculate the factorial with time complexity O(log(log(n)) * M(n * log(n))) where M(n) is the time complexity of multiplying two n-digit numbers together.
pub fn fast_factorial(n: usize) -> BigUint {
if n < 2 {
return BigUint::one();
}
// get list of primes that will be factors of n!
let primes = sieve_of_eratosthenes(n);
// Map the primes with their index
let p_indices = primes
.into_iter()
.map(|p| (p, index(p, n)))
.collect::<BTreeMap<_, _>>();
let max_bits = p_indices[&2].next_power_of_two().ilog2() + 1;
// Create a Vec of 1's
let mut a = vec![BigUint::one(); max_bits as usize];
// For every prime p, multiply a[i] by p if the ith bit of p's index is 1
for (p, i) in p_indices {
let mut bit = 1usize;
while bit.ilog2() < max_bits {
if (bit & i) > 0 {
a[bit.ilog2() as usize] *= p;
}
bit <<= 1;
}
}
a.into_iter()
.enumerate()
.map(|(i, a_i)| a_i.pow(2u32.pow(i as u32))) // raise every a[i] to the 2^ith power
.product() // we get our answer by multiplying the result
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::factorial::factorial_bigmath;
#[test]
fn fact() {
assert_eq!(fast_factorial(0), BigUint::one());
assert_eq!(fast_factorial(1), BigUint::one());
assert_eq!(fast_factorial(2), factorial_bigmath(2));
assert_eq!(fast_factorial(3), factorial_bigmath(3));
assert_eq!(fast_factorial(6), factorial_bigmath(6));
assert_eq!(fast_factorial(7), factorial_bigmath(7));
assert_eq!(fast_factorial(10), factorial_bigmath(10));
assert_eq!(fast_factorial(11), factorial_bigmath(11));
assert_eq!(fast_factorial(18), factorial_bigmath(18));
assert_eq!(fast_factorial(19), factorial_bigmath(19));
assert_eq!(fast_factorial(30), factorial_bigmath(30));
assert_eq!(fast_factorial(34), factorial_bigmath(34));
assert_eq!(fast_factorial(35), factorial_bigmath(35));
assert_eq!(fast_factorial(52), factorial_bigmath(52));
assert_eq!(fast_factorial(100), factorial_bigmath(100));
assert_eq!(fast_factorial(1000), factorial_bigmath(1000));
assert_eq!(fast_factorial(5000), factorial_bigmath(5000));
}
}
| rust | {
"argument_definitions": [],
"end_line": 60,
"name": "fast_factorial",
"signature": "pub fn fast_factorial(n: usize) -> BigUint",
"start_line": 25
} | {
"class_name": "",
"class_signature": ""
} |
transposition | Rust-master/src/ciphers/transposition.rs | pub fn transposition(decrypt_mode: bool, msg: &str, key: &str) -> String {
let key_uppercase = key.to_uppercase();
let mut cipher_msg: String = msg.to_string();
let keys: Vec<&str> = if decrypt_mode {
key_uppercase.split_whitespace().rev().collect()
} else {
key_uppercase.split_whitespace().collect()
};
for cipher_key in keys.iter() {
let mut key_order: Vec<usize> = Vec::new();
// Removes any non-alphabet characters from 'msg'
cipher_msg = cipher_msg
.to_uppercase()
.chars()
.filter(|&c| c.is_ascii_alphabetic())
.collect();
// Determines the sequence of the columns, as dictated by the
// alphabetical order of the keyword's letters
let mut key_ascii: Vec<(usize, u8)> =
cipher_key.bytes().enumerate().collect::<Vec<(usize, u8)>>();
key_ascii.sort_by_key(|&(_, key)| key);
for (counter, (_, key)) in key_ascii.iter_mut().enumerate() {
*key = counter as u8;
}
key_ascii.sort_by_key(|&(index, _)| index);
key_ascii
.into_iter()
.for_each(|(_, key)| key_order.push(key.into()));
// Determines whether to encrypt or decrypt the message,
// and returns the result
cipher_msg = if decrypt_mode {
decrypt(cipher_msg, key_order)
} else {
encrypt(cipher_msg, key_order)
};
}
cipher_msg
} | //! Transposition Cipher
//!
//! The Transposition Cipher is a method of encryption by which a message is shifted
//! according to a regular system, so that the ciphertext is a rearrangement of the
//! original message. The most commonly referred to Transposition Cipher is the
//! COLUMNAR TRANSPOSITION cipher, which is demonstrated below.
use std::ops::RangeInclusive;
/// Encrypts or decrypts a message, using multiple keys. The
/// encryption is based on the columnar transposition method.
pub fn transposition(decrypt_mode: bool, msg: &str, key: &str) -> String {
let key_uppercase = key.to_uppercase();
let mut cipher_msg: String = msg.to_string();
let keys: Vec<&str> = if decrypt_mode {
key_uppercase.split_whitespace().rev().collect()
} else {
key_uppercase.split_whitespace().collect()
};
for cipher_key in keys.iter() {
let mut key_order: Vec<usize> = Vec::new();
// Removes any non-alphabet characters from 'msg'
cipher_msg = cipher_msg
.to_uppercase()
.chars()
.filter(|&c| c.is_ascii_alphabetic())
.collect();
// Determines the sequence of the columns, as dictated by the
// alphabetical order of the keyword's letters
let mut key_ascii: Vec<(usize, u8)> =
cipher_key.bytes().enumerate().collect::<Vec<(usize, u8)>>();
key_ascii.sort_by_key(|&(_, key)| key);
for (counter, (_, key)) in key_ascii.iter_mut().enumerate() {
*key = counter as u8;
}
key_ascii.sort_by_key(|&(index, _)| index);
key_ascii
.into_iter()
.for_each(|(_, key)| key_order.push(key.into()));
// Determines whether to encrypt or decrypt the message,
// and returns the result
cipher_msg = if decrypt_mode {
decrypt(cipher_msg, key_order)
} else {
encrypt(cipher_msg, key_order)
};
}
cipher_msg
}
/// Performs the columnar transposition encryption
fn encrypt(mut msg: String, key_order: Vec<usize>) -> String {
let mut encrypted_msg: String = String::from("");
let mut encrypted_vec: Vec<String> = Vec::new();
let msg_len = msg.len();
let key_len: usize = key_order.len();
let mut msg_index: usize = msg_len;
let mut key_index: usize = key_len;
// Loop each column, pushing it to a Vec<T>
while !msg.is_empty() {
let mut chars: String = String::from("");
let mut index: usize = 0;
key_index -= 1;
// Loop every nth character, determined by key length, to create a column
while index < msg_index {
let ch = msg.remove(index);
chars.push(ch);
index += key_index;
msg_index -= 1;
}
encrypted_vec.push(chars);
}
// Concatenate the columns into a string, determined by the
// alphabetical order of the keyword's characters
let mut indexed_vec: Vec<(usize, &String)> = Vec::new();
let mut indexed_msg: String = String::from("");
for (counter, key_index) in key_order.into_iter().enumerate() {
indexed_vec.push((key_index, &encrypted_vec[counter]));
}
indexed_vec.sort();
for (_, column) in indexed_vec {
indexed_msg.push_str(column);
}
// Split the message by a space every nth character, determined by
// 'message length divided by keyword length' to the next highest integer.
let msg_div: usize = (msg_len as f32 / key_len as f32).ceil() as usize;
let mut counter: usize = 0;
indexed_msg.chars().for_each(|c| {
encrypted_msg.push(c);
counter += 1;
if counter == msg_div {
encrypted_msg.push(' ');
counter = 0;
}
});
encrypted_msg.trim_end().to_string()
}
/// Performs the columnar transposition decryption
fn decrypt(mut msg: String, key_order: Vec<usize>) -> String {
let mut decrypted_msg: String = String::from("");
let mut decrypted_vec: Vec<String> = Vec::new();
let mut indexed_vec: Vec<(usize, String)> = Vec::new();
let msg_len = msg.len();
let key_len: usize = key_order.len();
// Split the message into columns, determined by 'message length divided by keyword length'.
// Some columns are larger by '+1', where the prior calculation leaves a remainder.
let split_size: usize = (msg_len as f64 / key_len as f64) as usize;
let msg_mod: usize = msg_len % key_len;
let mut counter: usize = msg_mod;
let mut key_split: Vec<usize> = key_order.clone();
let (split_large, split_small) = key_split.split_at_mut(msg_mod);
split_large.sort_unstable();
split_small.sort_unstable();
split_large.iter_mut().rev().for_each(|key_index| {
counter -= 1;
let range: RangeInclusive<usize> =
((*key_index * split_size) + counter)..=(((*key_index + 1) * split_size) + counter);
let slice: String = msg[range.clone()].to_string();
indexed_vec.push((*key_index, slice));
msg.replace_range(range, "");
});
for key_index in split_small.iter_mut() {
let (slice, rest_of_msg) = msg.split_at(split_size);
indexed_vec.push((*key_index, (slice.to_string())));
msg = rest_of_msg.to_string();
}
indexed_vec.sort();
for key in key_order {
if let Some((_, column)) = indexed_vec.iter().find(|(key_index, _)| key_index == &key) {
decrypted_vec.push(column.to_string());
}
}
// Concatenate the columns into a string, determined by the
// alphabetical order of the keyword's characters
for _ in 0..split_size {
decrypted_vec.iter_mut().for_each(|column| {
decrypted_msg.push(column.remove(0));
})
}
if !decrypted_vec.is_empty() {
decrypted_vec.into_iter().for_each(|chars| {
decrypted_msg.push_str(&chars);
})
}
decrypted_msg
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encryption() {
assert_eq!(
transposition(
false,
"The quick brown fox jumps over the lazy dog",
"Archive",
),
"TKOOL ERJEZ CFSEG QOURY UWMTD HBXVA INPHO"
);
assert_eq!(
transposition(
false,
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/;'[]{}:|_+=-`~() ",
"Tenacious"
),
"DMVENW ENWFOX BKTCLU FOXGPY CLUDMV GPYHQZ IRAJSA JSBKTH QZIR"
);
assert_eq!(
transposition(false, "WE ARE DISCOVERED. FLEE AT ONCE.", "ZEBRAS"),
"EVLNA CDTES EAROF ODEEC WIREE"
);
}
#[test]
fn decryption() {
assert_eq!(
transposition(true, "TKOOL ERJEZ CFSEG QOURY UWMTD HBXVA INPHO", "Archive"),
"THEQUICKBROWNFOXJUMPSOVERTHELAZYDOG"
);
assert_eq!(
transposition(
true,
"DMVENW ENWFOX BKTCLU FOXGPY CLUDMV GPYHQZ IRAJSA JSBKTH QZIR",
"Tenacious"
),
"ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
);
assert_eq!(
transposition(true, "EVLNA CDTES EAROF ODEEC WIREE", "ZEBRAS"),
"WEAREDISCOVEREDFLEEATONCE"
);
}
#[test]
fn double_encryption() {
assert_eq!(
transposition(
false,
"The quick brown fox jumps over the lazy dog",
"Archive Snow"
),
"KEZEUWHAH ORCGRMBIO TLESOUDVP OJFQYTXN"
);
assert_eq!(
transposition(
false,
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/;'[]{}:|_+=-`~() ",
"Tenacious Drink"
),
"DWOCXLGZSKI VNBUPDYRJHN FTOCVQJBZEW KFYMHASQMEX LGUPIATR"
);
assert_eq!(
transposition(false, "WE ARE DISCOVERED. FLEE AT ONCE.", "ZEBRAS STRIPE"),
"CAEEN SOIAE DRLEF WEDRE EVTOC"
);
}
#[test]
fn double_decryption() {
assert_eq!(
transposition(
true,
"KEZEUWHAH ORCGRMBIO TLESOUDVP OJFQYTXN",
"Archive Snow"
),
"THEQUICKBROWNFOXJUMPSOVERTHELAZYDOG"
);
assert_eq!(
transposition(
true,
"DWOCXLGZSKI VNBUPDYRJHN FTOCVQJBZEW KFYMHASQMEX LGUPIATR",
"Tenacious Drink",
),
"ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
);
assert_eq!(
transposition(true, "CAEEN SOIAE DRLEF WEDRE EVTOC", "ZEBRAS STRIPE"),
"WEAREDISCOVEREDFLEEATONCE"
);
}
}
| rust | {
"argument_definitions": [],
"end_line": 59,
"name": "transposition",
"signature": "pub fn transposition(decrypt_mode: bool, msg: &str, key: &str) -> String",
"start_line": 12
} | {
"class_name": "",
"class_signature": ""
} |
blake2b | Rust-master/src/ciphers/blake2b.rs | pub fn blake2b(m: &[u8], k: &[u8], nn: u8) -> Vec<u8> {
let kk = min(k.len(), KK_MAX);
let nn = min(nn, NN_MAX);
// Prevent user from giving a key that is too long
let k = &k[..kk];
let dd = max(ceil(kk, BB) + ceil(m.len(), BB), 1);
let mut blocks: Vec<Block> = vec![blank_block(); dd];
// Copy key into blocks
for (w, c) in blocks[0].iter_mut().zip(k.chunks(U64BYTES)) {
*w = bytes_to_word(c);
}
let first_index = (kk > 0) as usize;
// Copy bytes from message into blocks
for (i, c) in m.chunks(U64BYTES).enumerate() {
let block_index = first_index + (i / (BB / U64BYTES));
let word_in_block = i % (BB / U64BYTES);
blocks[block_index][word_in_block] = bytes_to_word(c);
}
blake2(blocks, m.len() as u128, kk as u64, nn as Word)
} | // For specification go to https://www.rfc-editor.org/rfc/rfc7693
use std::cmp::{max, min};
use std::convert::{TryFrom, TryInto};
type Word = u64;
const BB: usize = 128;
const U64BYTES: usize = (u64::BITS as usize) / 8;
type Block = [Word; BB / U64BYTES];
const KK_MAX: usize = 64;
const NN_MAX: u8 = 64;
// Array of round constants used in mixing function G
const RC: [u32; 4] = [32, 24, 16, 63];
// IV[i] = floor(2**64 * frac(sqrt(prime(i+1)))) where prime(i) is the ith prime number
const IV: [Word; 8] = [
0x6A09E667F3BCC908,
0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B,
0xA54FF53A5F1D36F1,
0x510E527FADE682D1,
0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B,
0x5BE0CD19137E2179,
];
const SIGMA: [[usize; 16]; 10] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3],
[11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4],
[7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8],
[9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13],
[2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9],
[12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11],
[13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10],
[6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5],
[10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0],
];
#[inline]
const fn blank_block() -> Block {
[0u64; BB / U64BYTES]
}
// Overflowing addition
#[inline]
fn add(a: &mut Word, b: Word) {
*a = a.overflowing_add(b).0;
}
#[inline]
const fn ceil(dividend: usize, divisor: usize) -> usize {
(dividend / divisor) + ((dividend % divisor != 0) as usize)
}
fn g(v: &mut [Word; 16], a: usize, b: usize, c: usize, d: usize, x: Word, y: Word) {
for (m, r) in [x, y].into_iter().zip(RC.chunks(2)) {
let v_b = v[b];
add(&mut v[a], v_b);
add(&mut v[a], m);
v[d] = (v[d] ^ v[a]).rotate_right(r[0]);
let v_d = v[d];
add(&mut v[c], v_d);
v[b] = (v[b] ^ v[c]).rotate_right(r[1]);
}
}
fn f(h: &mut [Word; 8], m: Block, t: u128, flag: bool) {
let mut v: [Word; 16] = [0; 16];
for (i, (h_i, iv_i)) in h.iter().zip(IV.iter()).enumerate() {
v[i] = *h_i;
v[i + 8] = *iv_i;
}
v[12] ^= (t % (u64::MAX as u128)) as u64;
v[13] ^= (t >> 64) as u64;
if flag {
v[14] = !v[14];
}
for i in 0..12 {
let s = SIGMA[i % 10];
let mut s_index = 0;
for j in 0..4 {
g(
&mut v,
j,
j + 4,
j + 8,
j + 12,
m[s[s_index]],
m[s[s_index + 1]],
);
s_index += 2;
}
let i1d = |col, row| {
let col = col % 4;
let row = row % 4;
(row * 4) + col
};
for j in 0..4 {
// Produces indeces for diagonals of a 4x4 matrix starting at 0,j
let idx: Vec<usize> = (0..4).map(|n| i1d(j + n, n) as usize).collect();
g(
&mut v,
idx[0],
idx[1],
idx[2],
idx[3],
m[s[s_index]],
m[s[s_index + 1]],
);
s_index += 2;
}
}
for (i, n) in h.iter_mut().enumerate() {
*n ^= v[i] ^ v[i + 8];
}
}
fn blake2(d: Vec<Block>, ll: u128, kk: Word, nn: Word) -> Vec<u8> {
let mut h: [Word; 8] = IV
.iter()
.take(8)
.copied()
.collect::<Vec<Word>>()
.try_into()
.unwrap();
h[0] ^= 0x01010000u64 ^ (kk << 8) ^ nn;
if d.len() > 1 {
for (i, w) in d.iter().enumerate().take(d.len() - 1) {
f(&mut h, *w, (i as u128 + 1) * BB as u128, false);
}
}
let ll = if kk > 0 { ll + BB as u128 } else { ll };
f(&mut h, d[d.len() - 1], ll, true);
h.iter()
.flat_map(|n| n.to_le_bytes())
.take(nn as usize)
.collect()
}
// Take arbitrarily long slice of u8's and turn up to 8 bytes into u64
fn bytes_to_word(bytes: &[u8]) -> Word {
if let Ok(arr) = <[u8; U64BYTES]>::try_from(bytes) {
Word::from_le_bytes(arr)
} else {
let mut arr = [0u8; 8];
for (a_i, b_i) in arr.iter_mut().zip(bytes) {
*a_i = *b_i;
}
Word::from_le_bytes(arr)
}
}
pub fn blake2b(m: &[u8], k: &[u8], nn: u8) -> Vec<u8> {
let kk = min(k.len(), KK_MAX);
let nn = min(nn, NN_MAX);
// Prevent user from giving a key that is too long
let k = &k[..kk];
let dd = max(ceil(kk, BB) + ceil(m.len(), BB), 1);
let mut blocks: Vec<Block> = vec![blank_block(); dd];
// Copy key into blocks
for (w, c) in blocks[0].iter_mut().zip(k.chunks(U64BYTES)) {
*w = bytes_to_word(c);
}
let first_index = (kk > 0) as usize;
// Copy bytes from message into blocks
for (i, c) in m.chunks(U64BYTES).enumerate() {
let block_index = first_index + (i / (BB / U64BYTES));
let word_in_block = i % (BB / U64BYTES);
blocks[block_index][word_in_block] = bytes_to_word(c);
}
blake2(blocks, m.len() as u128, kk as u64, nn as Word)
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! digest_test {
($fname:ident, $message:expr, $key:expr, $nn:literal, $expected:expr) => {
#[test]
fn $fname() {
let digest = blake2b($message, $key, $nn);
let expected = Vec::from($expected);
assert_eq!(digest, expected);
}
};
}
digest_test!(
blake2b_from_rfc,
&[0x61, 0x62, 0x63],
&[0; 0],
64,
[
0xBA, 0x80, 0xA5, 0x3F, 0x98, 0x1C, 0x4D, 0x0D, 0x6A, 0x27, 0x97, 0xB6, 0x9F, 0x12,
0xF6, 0xE9, 0x4C, 0x21, 0x2F, 0x14, 0x68, 0x5A, 0xC4, 0xB7, 0x4B, 0x12, 0xBB, 0x6F,
0xDB, 0xFF, 0xA2, 0xD1, 0x7D, 0x87, 0xC5, 0x39, 0x2A, 0xAB, 0x79, 0x2D, 0xC2, 0x52,
0xD5, 0xDE, 0x45, 0x33, 0xCC, 0x95, 0x18, 0xD3, 0x8A, 0xA8, 0xDB, 0xF1, 0x92, 0x5A,
0xB9, 0x23, 0x86, 0xED, 0xD4, 0x00, 0x99, 0x23
]
);
digest_test!(
blake2b_empty,
&[0; 0],
&[0; 0],
64,
[
0x78, 0x6a, 0x02, 0xf7, 0x42, 0x01, 0x59, 0x03, 0xc6, 0xc6, 0xfd, 0x85, 0x25, 0x52,
0xd2, 0x72, 0x91, 0x2f, 0x47, 0x40, 0xe1, 0x58, 0x47, 0x61, 0x8a, 0x86, 0xe2, 0x17,
0xf7, 0x1f, 0x54, 0x19, 0xd2, 0x5e, 0x10, 0x31, 0xaf, 0xee, 0x58, 0x53, 0x13, 0x89,
0x64, 0x44, 0x93, 0x4e, 0xb0, 0x4b, 0x90, 0x3a, 0x68, 0x5b, 0x14, 0x48, 0xb7, 0x55,
0xd5, 0x6f, 0x70, 0x1a, 0xfe, 0x9b, 0xe2, 0xce
]
);
digest_test!(
blake2b_empty_with_key,
&[0; 0],
&[
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
],
64,
[
0x10, 0xeb, 0xb6, 0x77, 0x00, 0xb1, 0x86, 0x8e, 0xfb, 0x44, 0x17, 0x98, 0x7a, 0xcf,
0x46, 0x90, 0xae, 0x9d, 0x97, 0x2f, 0xb7, 0xa5, 0x90, 0xc2, 0xf0, 0x28, 0x71, 0x79,
0x9a, 0xaa, 0x47, 0x86, 0xb5, 0xe9, 0x96, 0xe8, 0xf0, 0xf4, 0xeb, 0x98, 0x1f, 0xc2,
0x14, 0xb0, 0x05, 0xf4, 0x2d, 0x2f, 0xf4, 0x23, 0x34, 0x99, 0x39, 0x16, 0x53, 0xdf,
0x7a, 0xef, 0xcb, 0xc1, 0x3f, 0xc5, 0x15, 0x68
]
);
digest_test!(
blake2b_key_shortin,
&[0],
&[
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
],
64,
[
0x96, 0x1f, 0x6d, 0xd1, 0xe4, 0xdd, 0x30, 0xf6, 0x39, 0x01, 0x69, 0x0c, 0x51, 0x2e,
0x78, 0xe4, 0xb4, 0x5e, 0x47, 0x42, 0xed, 0x19, 0x7c, 0x3c, 0x5e, 0x45, 0xc5, 0x49,
0xfd, 0x25, 0xf2, 0xe4, 0x18, 0x7b, 0x0b, 0xc9, 0xfe, 0x30, 0x49, 0x2b, 0x16, 0xb0,
0xd0, 0xbc, 0x4e, 0xf9, 0xb0, 0xf3, 0x4c, 0x70, 0x03, 0xfa, 0xc0, 0x9a, 0x5e, 0xf1,
0x53, 0x2e, 0x69, 0x43, 0x02, 0x34, 0xce, 0xbd
]
);
digest_test!(
blake2b_keyed_filled,
&[
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
],
&[
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
],
64,
[
0x65, 0x67, 0x6d, 0x80, 0x06, 0x17, 0x97, 0x2f, 0xbd, 0x87, 0xe4, 0xb9, 0x51, 0x4e,
0x1c, 0x67, 0x40, 0x2b, 0x7a, 0x33, 0x10, 0x96, 0xd3, 0xbf, 0xac, 0x22, 0xf1, 0xab,
0xb9, 0x53, 0x74, 0xab, 0xc9, 0x42, 0xf1, 0x6e, 0x9a, 0xb0, 0xea, 0xd3, 0x3b, 0x87,
0xc9, 0x19, 0x68, 0xa6, 0xe5, 0x09, 0xe1, 0x19, 0xff, 0x07, 0x78, 0x7b, 0x3e, 0xf4,
0x83, 0xe1, 0xdc, 0xdc, 0xcf, 0x6e, 0x30, 0x22
]
);
}
| rust | {
"argument_definitions": [],
"end_line": 206,
"name": "blake2b",
"signature": "pub fn blake2b(m: &[u8], k: &[u8], nn: u8) -> Vec<u8>",
"start_line": 179
} | {
"class_name": "",
"class_signature": ""
} |
decimal_to_hexadecimal | Rust-master/src/conversions/decimal_to_hexadecimal.rs | pub fn decimal_to_hexadecimal(base_num: u64) -> String {
let mut num = base_num;
let mut hexadecimal_num = String::new();
loop {
let remainder = num % 16;
let hex_char = if remainder < 10 {
(remainder as u8 + b'0') as char
} else {
(remainder as u8 - 10 + b'A') as char
};
hexadecimal_num.insert(0, hex_char);
num /= 16;
if num == 0 {
break;
}
}
hexadecimal_num
} | pub fn decimal_to_hexadecimal(base_num: u64) -> String {
let mut num = base_num;
let mut hexadecimal_num = String::new();
loop {
let remainder = num % 16;
let hex_char = if remainder < 10 {
(remainder as u8 + b'0') as char
} else {
(remainder as u8 - 10 + b'A') as char
};
hexadecimal_num.insert(0, hex_char);
num /= 16;
if num == 0 {
break;
}
}
hexadecimal_num
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_zero() {
assert_eq!(decimal_to_hexadecimal(0), "0");
}
#[test]
fn test_single_digit_decimal() {
assert_eq!(decimal_to_hexadecimal(9), "9");
}
#[test]
fn test_single_digit_hexadecimal() {
assert_eq!(decimal_to_hexadecimal(12), "C");
}
#[test]
fn test_multiple_digit_hexadecimal() {
assert_eq!(decimal_to_hexadecimal(255), "FF");
}
#[test]
fn test_big() {
assert_eq!(decimal_to_hexadecimal(u64::MAX), "FFFFFFFFFFFFFFFF");
}
#[test]
fn test_random() {
assert_eq!(decimal_to_hexadecimal(123456), "1E240");
}
}
| rust | {
"argument_definitions": [],
"end_line": 21,
"name": "decimal_to_hexadecimal",
"signature": "pub fn decimal_to_hexadecimal(base_num: u64) -> String",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
area_under_curve | Rust-master/src/math/area_under_curve.rs | pub fn area_under_curve(start: f64, end: f64, func: fn(f64) -> f64, step_count: usize) -> f64 {
assert!(step_count > 0);
let (start, end) = if start > end {
(end, start)
} else {
(start, end)
}; //swap if bounds reversed
let step_length: f64 = (end - start) / step_count as f64;
let mut area = 0f64;
let mut fx1 = func(start);
let mut fx2: f64;
for eval_point in (1..=step_count).map(|x| (x as f64 * step_length) + start) {
fx2 = func(eval_point);
area += (fx2 + fx1).abs() * step_length * 0.5;
fx1 = fx2;
}
area
} | pub fn area_under_curve(start: f64, end: f64, func: fn(f64) -> f64, step_count: usize) -> f64 {
assert!(step_count > 0);
let (start, end) = if start > end {
(end, start)
} else {
(start, end)
}; //swap if bounds reversed
let step_length: f64 = (end - start) / step_count as f64;
let mut area = 0f64;
let mut fx1 = func(start);
let mut fx2: f64;
for eval_point in (1..=step_count).map(|x| (x as f64 * step_length) + start) {
fx2 = func(eval_point);
area += (fx2 + fx1).abs() * step_length * 0.5;
fx1 = fx2;
}
area
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_linear_func() {
assert_eq!(area_under_curve(1f64, 2f64, |x| x, 10), 1.5000000000000002);
}
#[test]
fn test_quadratic_func() {
assert_eq!(
area_under_curve(1f64, 2f64, |x| x * x, 1000),
2.333333500000005
);
}
#[test]
fn test_zero_length() {
assert_eq!(area_under_curve(0f64, 0f64, |x| x * x, 1000), 0.0);
}
#[test]
fn test_reverse() {
assert_eq!(
area_under_curve(1f64, 2f64, |x| x, 10),
area_under_curve(2f64, 1f64, |x| x, 10)
);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub fn area_under_curve(start: f64, end: f64, func: fn(f64) -> f64, step_count: usize) -> f64 {\n assert!(step_count > 0);\n\n let (start, end) = if start > end {\n (end, start)\n } else {\n (start, end)\n }; //swap if bounds reversed\n\n let step_length: f64 = (end - start) / step_count as f64;\n let mut area = 0f64;\n let mut fx1 = func(start);\n let mut fx2: f64;\n\n for eval_point in (1..=step_count).map(|x| (x as f64 * step_length) + start) {\n fx2 = func(eval_point);\n area += (fx2 + fx1).abs() * step_length * 0.5;\n fx1 = fx2;\n }\n\n area\n}"
],
"name": "func",
"type": "fn(f64"
}
],
"end_line": 22,
"name": "area_under_curve",
"signature": "pub fn area_under_curve(start: f64, end: f64, func: fn(f64) -> f64, step_count: usize) -> f64",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
miller_rabin | Rust-master/src/math/miller_rabin.rs | pub fn miller_rabin(number: u64, bases: &[u64]) -> u64 {
// returns zero on a probable prime, and a witness if number is not prime
// A base set for deterministic performance on 64 bit numbers is:
// [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
// another one for 32 bits:
// [2, 3, 5, 7], with smallest number to fail 3'215'031'751 = 151 * 751 * 28351
// note that all bases should be prime
if number <= 4 {
match number {
0 => {
panic!("0 is invalid input for Miller-Rabin. 0 is not prime by definition, but has no witness");
}
2 | 3 => return 0,
_ => return number,
}
}
if bases.contains(&number) {
return 0;
}
let two_power: u64 = (number - 1).trailing_zeros() as u64;
let odd_power = (number - 1) >> two_power;
for base in bases {
if !check_prime_base(number, *base, two_power, odd_power) {
return *base;
}
}
0
} | use num_bigint::BigUint;
use num_traits::{One, ToPrimitive, Zero};
use std::cmp::Ordering;
fn modulo_power(mut base: u64, mut power: u64, modulo: u64) -> u64 {
base %= modulo;
if base == 0 {
return 0; // return zero if base is divisible by modulo
}
let mut ans: u128 = 1;
let mut bbase: u128 = base as u128;
while power > 0 {
if (power % 2) == 1 {
ans = (ans * bbase) % (modulo as u128);
}
bbase = (bbase * bbase) % (modulo as u128);
power /= 2;
}
ans as u64
}
fn check_prime_base(number: u64, base: u64, two_power: u64, odd_power: u64) -> bool {
// returns false if base is a witness
let mut x: u128 = modulo_power(base, odd_power, number) as u128;
let bnumber: u128 = number as u128;
if x == 1 || x == (bnumber - 1) {
return true;
}
for _ in 1..two_power {
x = (x * x) % bnumber;
if x == (bnumber - 1) {
return true;
}
}
false
}
pub fn miller_rabin(number: u64, bases: &[u64]) -> u64 {
// returns zero on a probable prime, and a witness if number is not prime
// A base set for deterministic performance on 64 bit numbers is:
// [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
// another one for 32 bits:
// [2, 3, 5, 7], with smallest number to fail 3'215'031'751 = 151 * 751 * 28351
// note that all bases should be prime
if number <= 4 {
match number {
0 => {
panic!("0 is invalid input for Miller-Rabin. 0 is not prime by definition, but has no witness");
}
2 | 3 => return 0,
_ => return number,
}
}
if bases.contains(&number) {
return 0;
}
let two_power: u64 = (number - 1).trailing_zeros() as u64;
let odd_power = (number - 1) >> two_power;
for base in bases {
if !check_prime_base(number, *base, two_power, odd_power) {
return *base;
}
}
0
}
pub fn big_miller_rabin(number_ref: &BigUint, bases: &[u64]) -> u64 {
let number = number_ref.clone();
if BigUint::from(5u32).cmp(&number) == Ordering::Greater {
if number.eq(&BigUint::zero()) {
panic!("0 is invalid input for Miller-Rabin. 0 is not prime by definition, but has no witness");
} else if number.eq(&BigUint::from(2u32)) || number.eq(&BigUint::from(3u32)) {
return 0;
} else {
return number.to_u64().unwrap();
}
}
if let Some(num) = number.to_u64() {
if bases.contains(&num) {
return 0;
}
}
let num_minus_one = &number - BigUint::one();
let two_power: u64 = num_minus_one.trailing_zeros().unwrap();
let odd_power: BigUint = &num_minus_one >> two_power;
for base in bases {
let mut x = BigUint::from(*base).modpow(&odd_power, &number);
if x.eq(&BigUint::one()) || x.eq(&num_minus_one) {
continue;
}
let mut not_a_witness = false;
for _ in 1..two_power {
x = (&x * &x) % &number;
if x.eq(&num_minus_one) {
not_a_witness = true;
break;
}
}
if not_a_witness {
continue;
}
return *base;
}
0
}
#[cfg(test)]
mod tests {
use super::*;
static DEFAULT_BASES: [u64; 12] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37];
#[test]
fn basic() {
// these bases make miller rabin deterministic for any number < 2 ^ 64
// can use smaller number of bases for deterministic performance for numbers < 2 ^ 32
assert_eq!(miller_rabin(3, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(7, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(11, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(2003, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(1, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(4, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(6, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(21, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(2004, &DEFAULT_BASES), 0);
// bigger test cases.
// primes are generated using openssl
// non primes are randomly picked and checked using openssl
// primes:
assert_eq!(miller_rabin(3629611793, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(871594686869, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(968236663804121, &DEFAULT_BASES), 0);
assert_eq!(miller_rabin(6920153791723773023, &DEFAULT_BASES), 0);
// random non primes:
assert_ne!(miller_rabin(4546167556336341257, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(4363186415423517377, &DEFAULT_BASES), 0);
assert_ne!(miller_rabin(815479701131020226, &DEFAULT_BASES), 0);
// these two are made of two 31 bit prime factors:
// 1950202127 * 2058609037 = 4014703722618821699
assert_ne!(miller_rabin(4014703722618821699, &DEFAULT_BASES), 0);
// 1679076769 * 2076341633 = 3486337000477823777
assert_ne!(miller_rabin(3486337000477823777, &DEFAULT_BASES), 0);
}
#[test]
fn big_basic() {
assert_eq!(big_miller_rabin(&BigUint::from(3u32), &DEFAULT_BASES), 0);
assert_eq!(big_miller_rabin(&BigUint::from(7u32), &DEFAULT_BASES), 0);
assert_eq!(big_miller_rabin(&BigUint::from(11u32), &DEFAULT_BASES), 0);
assert_eq!(big_miller_rabin(&BigUint::from(2003u32), &DEFAULT_BASES), 0);
assert_ne!(big_miller_rabin(&BigUint::from(1u32), &DEFAULT_BASES), 0);
assert_ne!(big_miller_rabin(&BigUint::from(4u32), &DEFAULT_BASES), 0);
assert_ne!(big_miller_rabin(&BigUint::from(6u32), &DEFAULT_BASES), 0);
assert_ne!(big_miller_rabin(&BigUint::from(21u32), &DEFAULT_BASES), 0);
assert_ne!(big_miller_rabin(&BigUint::from(2004u32), &DEFAULT_BASES), 0);
assert_eq!(
big_miller_rabin(&BigUint::from(3629611793u64), &DEFAULT_BASES),
0
);
assert_eq!(
big_miller_rabin(&BigUint::from(871594686869u64), &DEFAULT_BASES),
0
);
assert_eq!(
big_miller_rabin(&BigUint::from(968236663804121u64), &DEFAULT_BASES),
0
);
assert_eq!(
big_miller_rabin(&BigUint::from(6920153791723773023u64), &DEFAULT_BASES),
0
);
assert_ne!(
big_miller_rabin(&BigUint::from(4546167556336341257u64), &DEFAULT_BASES),
0
);
assert_ne!(
big_miller_rabin(&BigUint::from(4363186415423517377u64), &DEFAULT_BASES),
0
);
assert_ne!(
big_miller_rabin(&BigUint::from(815479701131020226u64), &DEFAULT_BASES),
0
);
assert_ne!(
big_miller_rabin(&BigUint::from(4014703722618821699u64), &DEFAULT_BASES),
0
);
assert_ne!(
big_miller_rabin(&BigUint::from(3486337000477823777u64), &DEFAULT_BASES),
0
);
}
#[test]
#[ignore]
fn big_primes() {
let p1 =
BigUint::parse_bytes(b"4764862697132131451620315518348229845593592794669", 10).unwrap();
assert_eq!(big_miller_rabin(&p1, &DEFAULT_BASES), 0);
let p2 = BigUint::parse_bytes(
b"12550757946601963214089118080443488976766669415957018428703",
10,
)
.unwrap();
assert_eq!(big_miller_rabin(&p2, &DEFAULT_BASES), 0);
// An RSA-worthy prime
let p3 = BigUint::parse_bytes(b"157d6l5zkv45ve4azfw7nyyjt6rzir2gcjoytjev5iacnkaii8hlkyk3op7bx9qfqiie23vj9iw4qbp7zupydfq9ut6mq6m36etya6cshtqi1yi9q5xyiws92el79dqt8qk7l2pqmxaa0sxhmd2vpaibo9dkfd029j1rvkwlw4724ctgaqs5jzy0bqi5pqdjc2xerhn", 36).unwrap();
assert_eq!(big_miller_rabin(&p3, &DEFAULT_BASES), 0);
let n1 = BigUint::parse_bytes(b"coy6tkiaqswmce1r03ycdif3t796wzjwneewbe3cmncaplm85jxzcpdmvy0moic3lql70a81t5qdn2apac0dndhohewkspuk1wyndxsgxs3ux4a7730unru7dfmygh", 36).unwrap();
assert_ne!(big_miller_rabin(&n1, &DEFAULT_BASES), 0);
// RSA-2048
let n2 = BigUint::parse_bytes(b"4l91lq4a2sgekpv8ukx1gxsk7mfeks46haggorlkazm0oufxwijid6q6v44u5me3kz3ne6yczp4fcvo62oej72oe7pjjtyxgid5b8xdz1e8daafspbzcy1hd8i4urjh9hm0tyylsgqsss3jn372d6fmykpw4bb9cr1ngxnncsbod3kg49o7owzqnsci5pwqt8bch0t60gq0st2gyx7ii3mzhb1pp1yvjyor35hwvok1sxj3ih46rpd27li8y5yli3mgdttcn65k3szfa6rbcnbgkojqjjq72gar6raslnh6sjd2fy7yj3bwo43obvbg3ws8y28kpol3okb5b3fld03sq1kgrj2fugiaxgplva6x5ssilqq4g0b21xy2kiou3sqsgonmqx55v", 36).unwrap();
assert_ne!(big_miller_rabin(&n2, &DEFAULT_BASES), 0);
}
}
| rust | {
"argument_definitions": [],
"end_line": 65,
"name": "miller_rabin",
"signature": "pub fn miller_rabin(number: u64, bases: &[u64]) -> u64",
"start_line": 38
} | {
"class_name": "",
"class_signature": ""
} |
bell_number | Rust-master/src/math/bell_numbers.rs | pub fn bell_number(n: u32) -> BigUint {
let needs_resize;
// Check if number is already in lookup table
{
let lookup_table = LOOKUP_TABLE_LOCK.read().unwrap();
if let Some(entry) = lookup_table.get(n as usize) {
return entry;
}
needs_resize = (n + 1) as usize > lookup_table.capacity();
}
// Resize table before recursion so that if more values need to be added during recursion the table isn't
// reallocated every single time
if needs_resize {
let mut lookup_table = LOOKUP_TABLE_LOCK.write().unwrap();
lookup_table.resize((n + 1) as usize);
}
let new_bell_number: BigUint = (0..n).map(|x| bell_number(x) * n_choose_r(n - 1, x)).sum();
// Add new number to lookup table
{
let mut lookup_table = LOOKUP_TABLE_LOCK.write().unwrap();
lookup_table.set(n as usize, new_bell_number.clone());
}
new_bell_number
} | use num_bigint::BigUint;
use num_traits::{One, Zero};
use std::sync::RwLock;
/// Returns the number of ways you can select r items given n options
fn n_choose_r(n: u32, r: u32) -> BigUint {
if r == n || r == 0 {
return One::one();
}
if r > n {
return Zero::zero();
}
// Any combination will only need to be computed once, thus giving no need to
// memoize this function
let product: BigUint = (0..r).fold(BigUint::one(), |acc, x| {
(acc * BigUint::from(n - x)) / BigUint::from(x + 1)
});
product
}
/// A memoization table for storing previous results
struct MemTable {
buffer: Vec<BigUint>,
}
impl MemTable {
const fn new() -> Self {
MemTable { buffer: Vec::new() }
}
fn get(&self, n: usize) -> Option<BigUint> {
if n == 0 || n == 1 {
Some(BigUint::one())
} else if let Some(entry) = self.buffer.get(n) {
if *entry == BigUint::zero() {
None
} else {
Some(entry.clone())
}
} else {
None
}
}
fn set(&mut self, n: usize, b: BigUint) {
self.buffer[n] = b;
}
#[inline]
fn capacity(&self) -> usize {
self.buffer.capacity()
}
#[inline]
fn resize(&mut self, new_size: usize) {
if new_size > self.buffer.len() {
self.buffer.resize(new_size, Zero::zero());
}
}
}
// Implemented with RwLock so it is accessible across threads
static LOOKUP_TABLE_LOCK: RwLock<MemTable> = RwLock::new(MemTable::new());
pub fn bell_number(n: u32) -> BigUint {
let needs_resize;
// Check if number is already in lookup table
{
let lookup_table = LOOKUP_TABLE_LOCK.read().unwrap();
if let Some(entry) = lookup_table.get(n as usize) {
return entry;
}
needs_resize = (n + 1) as usize > lookup_table.capacity();
}
// Resize table before recursion so that if more values need to be added during recursion the table isn't
// reallocated every single time
if needs_resize {
let mut lookup_table = LOOKUP_TABLE_LOCK.write().unwrap();
lookup_table.resize((n + 1) as usize);
}
let new_bell_number: BigUint = (0..n).map(|x| bell_number(x) * n_choose_r(n - 1, x)).sum();
// Add new number to lookup table
{
let mut lookup_table = LOOKUP_TABLE_LOCK.write().unwrap();
lookup_table.set(n as usize, new_bell_number.clone());
}
new_bell_number
}
#[cfg(test)]
pub mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn test_choose_zero() {
for i in 1..100 {
assert_eq!(n_choose_r(i, 0), One::one());
}
}
#[test]
fn test_combination() {
let five_choose_1 = BigUint::from(5u32);
assert_eq!(n_choose_r(5, 1), five_choose_1);
assert_eq!(n_choose_r(5, 4), five_choose_1);
let ten_choose_3 = BigUint::from(120u32);
assert_eq!(n_choose_r(10, 3), ten_choose_3);
assert_eq!(n_choose_r(10, 7), ten_choose_3);
let fourty_two_choose_thirty = BigUint::from_str("11058116888").unwrap();
assert_eq!(n_choose_r(42, 30), fourty_two_choose_thirty);
assert_eq!(n_choose_r(42, 12), fourty_two_choose_thirty);
}
#[test]
fn test_bell_numbers() {
let bell_one = BigUint::from(1u32);
assert_eq!(bell_number(1), bell_one);
let bell_three = BigUint::from(5u32);
assert_eq!(bell_number(3), bell_three);
let bell_eight = BigUint::from(4140u32);
assert_eq!(bell_number(8), bell_eight);
let bell_six = BigUint::from(203u32);
assert_eq!(bell_number(6), bell_six);
let bell_twenty_six = BigUint::from_str("49631246523618756274").unwrap();
assert_eq!(bell_number(26), bell_twenty_six);
}
}
| rust | {
"argument_definitions": [],
"end_line": 101,
"name": "bell_number",
"signature": "pub fn bell_number(n: u32) -> BigUint",
"start_line": 69
} | {
"class_name": "",
"class_signature": ""
} |
least_square_approx | Rust-master/src/math/least_square_approx.rs | pub fn least_square_approx(
points: &[(T, U)],
degree: i32,
) -> Option<Vec<f64>> {
use nalgebra::{DMatrix, DVector};
/* Used for rounding floating numbers */
fn round_to_decimals(value: f64, decimals: i32) -> f64 {
let multiplier = 10f64.powi(decimals);
(value * multiplier).round() / multiplier
}
/* Casting the data parsed to this function to f64 (as some points can have decimals) */
let vals: Vec<(f64, f64)> = points
.iter()
.map(|(x, y)| ((*x).into(), (*y).into()))
.collect();
/* Because of collect we need the Copy Trait for T and U */
/* Computes the sums in the system of equations */
let mut sums = Vec::<f64>::new();
for i in 1..=(2 * degree + 1) {
sums.push(vals.iter().map(|(x, _)| x.powi(i - 1)).sum());
}
/* Compute the free terms column vector */
let mut free_col = Vec::<f64>::new();
for i in 1..=(degree + 1) {
free_col.push(vals.iter().map(|(x, y)| y * (x.powi(i - 1))).sum());
}
let b = DVector::from_row_slice(&free_col);
/* Create and fill the system's matrix */
let size = (degree + 1) as usize;
let a = DMatrix::from_fn(size, size, |i, j| sums[degree as usize + i - j]);
/* Solve the system of equations: A * x = b */
match a.qr().solve(&b) {
Some(x) => {
let rez: Vec<f64> = x.iter().map(|x| round_to_decimals(*x, 5)).collect();
Some(rez)
}
None => None, //<-- The system cannot be solved (badly conditioned system's matrix)
}
} | /// Least Square Approximation <p>
/// Function that returns a polynomial which very closely passes through the given points (in 2D)
///
/// The result is made of coeficients, in descending order (from x^degree to free term)
///
/// Parameters:
///
/// points -> coordinates of given points
///
/// degree -> degree of the polynomial
///
pub fn least_square_approx<T: Into<f64> + Copy, U: Into<f64> + Copy>(
points: &[(T, U)],
degree: i32,
) -> Option<Vec<f64>> {
use nalgebra::{DMatrix, DVector};
/* Used for rounding floating numbers */
fn round_to_decimals(value: f64, decimals: i32) -> f64 {
let multiplier = 10f64.powi(decimals);
(value * multiplier).round() / multiplier
}
/* Casting the data parsed to this function to f64 (as some points can have decimals) */
let vals: Vec<(f64, f64)> = points
.iter()
.map(|(x, y)| ((*x).into(), (*y).into()))
.collect();
/* Because of collect we need the Copy Trait for T and U */
/* Computes the sums in the system of equations */
let mut sums = Vec::<f64>::new();
for i in 1..=(2 * degree + 1) {
sums.push(vals.iter().map(|(x, _)| x.powi(i - 1)).sum());
}
/* Compute the free terms column vector */
let mut free_col = Vec::<f64>::new();
for i in 1..=(degree + 1) {
free_col.push(vals.iter().map(|(x, y)| y * (x.powi(i - 1))).sum());
}
let b = DVector::from_row_slice(&free_col);
/* Create and fill the system's matrix */
let size = (degree + 1) as usize;
let a = DMatrix::from_fn(size, size, |i, j| sums[degree as usize + i - j]);
/* Solve the system of equations: A * x = b */
match a.qr().solve(&b) {
Some(x) => {
let rez: Vec<f64> = x.iter().map(|x| round_to_decimals(*x, 5)).collect();
Some(rez)
}
None => None, //<-- The system cannot be solved (badly conditioned system's matrix)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ten_points_1st_degree() {
let points = vec![
(5.3, 7.8),
(4.9, 8.1),
(6.1, 6.9),
(4.7, 8.3),
(6.5, 7.7),
(5.6, 7.0),
(5.8, 8.2),
(4.5, 8.0),
(6.3, 7.2),
(5.1, 8.4),
];
assert_eq!(
least_square_approx(&points, 1),
Some(vec![-0.49069, 10.44898])
);
}
#[test]
fn eight_points_5th_degree() {
let points = vec![
(4f64, 8f64),
(8f64, 2f64),
(1f64, 7f64),
(10f64, 3f64),
(11.0, 0.0),
(7.0, 3.0),
(10.0, 1.0),
(13.0, 13.0),
];
assert_eq!(
least_square_approx(&points, 5),
Some(vec![
0.00603, -0.21304, 2.79929, -16.53468, 40.29473, -19.35771
])
);
}
#[test]
fn four_points_2nd_degree() {
let points = vec![
(2.312, 8.345344),
(-2.312, 8.345344),
(-0.7051, 3.49716601),
(0.7051, 3.49716601),
];
assert_eq!(least_square_approx(&points, 2), Some(vec![1.0, 0.0, 3.0]));
}
}
| rust | {
"argument_definitions": [],
"end_line": 56,
"name": "least_square_approx",
"signature": "pub fn least_square_approx(\n points: &[(T, U)],\n degree: i32,\n) -> Option<Vec<f64>>",
"start_line": 12
} | {
"class_name": "",
"class_signature": ""
} |
modular_exponential | Rust-master/src/math/modular_exponential.rs | pub fn modular_exponential(base: i64, mut power: i64, modulus: i64) -> i64 {
if modulus == 1 {
return 0; // Base case: any number modulo 1 is 0
}
// Adjust if the exponent is negative by finding the modular inverse
let mut base = if power < 0 {
mod_inverse(base, modulus)
} else {
base % modulus
};
let mut result = 1; // Initialize result
power = power.abs(); // Work with the absolute value of the exponent
// Perform the exponentiation
while power > 0 {
if power & 1 == 1 {
result = (result * base) % modulus;
}
power >>= 1; // Divide the power by 2
base = (base * base) % modulus; // Square the base
}
result
} | /// Calculate the greatest common divisor (GCD) of two numbers and the
/// coefficients of Bézout's identity using the Extended Euclidean Algorithm.
///
/// # Arguments
///
/// * `a` - One of the numbers to find the GCD of
/// * `m` - The other number to find the GCD of
///
/// # Returns
///
/// A tuple (gcd, x1, x2) such that:
/// gcd - the greatest common divisor of a and m.
/// x1, x2 - the coefficients such that `a * x1 + m * x2` is equivalent to `gcd` modulo `m`.
pub fn gcd_extended(a: i64, m: i64) -> (i64, i64, i64) {
if a == 0 {
(m, 0, 1)
} else {
let (gcd, x1, x2) = gcd_extended(m % a, a);
let x = x2 - (m / a) * x1;
(gcd, x, x1)
}
}
/// Find the modular multiplicative inverse of a number modulo `m`.
///
/// # Arguments
///
/// * `b` - The number to find the modular inverse of
/// * `m` - The modulus
///
/// # Returns
///
/// The modular inverse of `b` modulo `m`.
///
/// # Panics
///
/// Panics if the inverse does not exist (i.e., `b` and `m` are not coprime).
pub fn mod_inverse(b: i64, m: i64) -> i64 {
let (gcd, x, _) = gcd_extended(b, m);
if gcd != 1 {
panic!("Inverse does not exist");
} else {
// Ensure the modular inverse is positive
(x % m + m) % m
}
}
/// Perform modular exponentiation of a number raised to a power modulo `m`.
/// This function handles both positive and negative exponents.
///
/// # Arguments
///
/// * `base` - The base number to be raised to the `power`
/// * `power` - The exponent to raise the `base` to
/// * `modulus` - The modulus to perform the operation under
///
/// # Returns
///
/// The result of `base` raised to `power` modulo `modulus`.
pub fn modular_exponential(base: i64, mut power: i64, modulus: i64) -> i64 {
if modulus == 1 {
return 0; // Base case: any number modulo 1 is 0
}
// Adjust if the exponent is negative by finding the modular inverse
let mut base = if power < 0 {
mod_inverse(base, modulus)
} else {
base % modulus
};
let mut result = 1; // Initialize result
power = power.abs(); // Work with the absolute value of the exponent
// Perform the exponentiation
while power > 0 {
if power & 1 == 1 {
result = (result * base) % modulus;
}
power >>= 1; // Divide the power by 2
base = (base * base) % modulus; // Square the base
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_modular_exponential_positive() {
assert_eq!(modular_exponential(2, 3, 5), 3); // 2^3 % 5 = 8 % 5 = 3
assert_eq!(modular_exponential(7, 2, 13), 10); // 7^2 % 13 = 49 % 13 = 10
assert_eq!(modular_exponential(5, 5, 31), 25); // 5^5 % 31 = 3125 % 31 = 25
assert_eq!(modular_exponential(10, 8, 11), 1); // 10^8 % 11 = 100000000 % 11 = 1
assert_eq!(modular_exponential(123, 45, 67), 62); // 123^45 % 67
}
#[test]
fn test_modular_inverse() {
assert_eq!(mod_inverse(7, 13), 2); // Inverse of 7 mod 13 is 2
assert_eq!(mod_inverse(5, 31), 25); // Inverse of 5 mod 31 is 25
assert_eq!(mod_inverse(10, 11), 10); // Inverse of 10 mod 1 is 10
assert_eq!(mod_inverse(123, 67), 6); // Inverse of 123 mod 67 is 6
assert_eq!(mod_inverse(9, 17), 2); // Inverse of 9 mod 17 is 2
}
#[test]
fn test_modular_exponential_negative() {
assert_eq!(
modular_exponential(7, -2, 13),
mod_inverse(7, 13).pow(2) % 13
); // Inverse of 7 mod 13 is 2, 2^2 % 13 = 4 % 13 = 4
assert_eq!(
modular_exponential(5, -5, 31),
mod_inverse(5, 31).pow(5) % 31
); // Inverse of 5 mod 31 is 25, 25^5 % 31 = 25
assert_eq!(
modular_exponential(10, -8, 11),
mod_inverse(10, 11).pow(8) % 11
); // Inverse of 10 mod 11 is 10, 10^8 % 11 = 10
assert_eq!(
modular_exponential(123, -5, 67),
mod_inverse(123, 67).pow(5) % 67
); // Inverse of 123 mod 67 is calculated via the function
}
#[test]
fn test_modular_exponential_edge_cases() {
assert_eq!(modular_exponential(0, 0, 1), 0); // 0^0 % 1 should be 0 as the modulus is 1
assert_eq!(modular_exponential(0, 10, 1), 0); // 0^n % 1 should be 0 for any n
assert_eq!(modular_exponential(10, 0, 1), 0); // n^0 % 1 should be 0 for any n
assert_eq!(modular_exponential(1, 1, 1), 0); // 1^1 % 1 should be 0
assert_eq!(modular_exponential(-1, 2, 1), 0); // (-1)^2 % 1 should be 0
}
}
| rust | {
"argument_definitions": [],
"end_line": 84,
"name": "modular_exponential",
"signature": "pub fn modular_exponential(base: i64, mut power: i64, modulus: i64) -> i64",
"start_line": 60
} | {
"class_name": "",
"class_signature": ""
} |
prime_factors | Rust-master/src/math/prime_factors.rs | pub fn prime_factors(n: u64) -> Vec<u64> {
let mut i = 2;
let mut n = n;
let mut factors = Vec::new();
while i * i <= n {
if n % i != 0 {
if i != 2 {
i += 1;
}
i += 1;
} else {
n /= i;
factors.push(i);
}
}
if n > 1 {
factors.push(n);
}
factors
} | // Finds the prime factors of a number in increasing order, with repetition.
pub fn prime_factors(n: u64) -> Vec<u64> {
let mut i = 2;
let mut n = n;
let mut factors = Vec::new();
while i * i <= n {
if n % i != 0 {
if i != 2 {
i += 1;
}
i += 1;
} else {
n /= i;
factors.push(i);
}
}
if n > 1 {
factors.push(n);
}
factors
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(prime_factors(0), vec![]);
assert_eq!(prime_factors(1), vec![]);
assert_eq!(prime_factors(11), vec![11]);
assert_eq!(prime_factors(25), vec![5, 5]);
assert_eq!(prime_factors(33), vec![3, 11]);
assert_eq!(prime_factors(2560), vec![2, 2, 2, 2, 2, 2, 2, 2, 2, 5]);
}
}
| rust | {
"argument_definitions": [],
"end_line": 22,
"name": "prime_factors",
"signature": "pub fn prime_factors(n: u64) -> Vec<u64>",
"start_line": 3
} | {
"class_name": "",
"class_signature": ""
} |
baby_step_giant_step | Rust-master/src/math/baby_step_giant_step.rs | pub fn baby_step_giant_step(a: usize, b: usize, n: usize) -> Option<usize> {
if greatest_common_divisor::greatest_common_divisor_stein(a as u64, n as u64) != 1 {
return None;
}
let mut h_map = HashMap::new();
let m = (n as f64).sqrt().ceil() as usize;
// baby step
let mut step = 1;
for i in 0..m {
h_map.insert((step * b) % n, i);
step = (step * a) % n;
}
// Now step = a^m (mod n), giant step
let giant_step = step;
for i in (m..=n).step_by(m) {
if let Some(v) = h_map.get(&step) {
return Some(i - v);
}
step = (step * giant_step) % n;
}
None
} | use crate::math::greatest_common_divisor;
/// Baby-step Giant-step algorithm
///
/// Solving discrete logarithm problem:
/// a^x = b (mod n) , with respect to gcd(a, n) == 1
/// with O(sqrt(n)) time complexity.
///
/// Wikipedia reference: https://en.wikipedia.org/wiki/Baby-step_giant-step
/// When a is the primitive root modulo n, the answer is unique.
/// Otherwise it will return the smallest positive solution
use std::collections::HashMap;
pub fn baby_step_giant_step(a: usize, b: usize, n: usize) -> Option<usize> {
if greatest_common_divisor::greatest_common_divisor_stein(a as u64, n as u64) != 1 {
return None;
}
let mut h_map = HashMap::new();
let m = (n as f64).sqrt().ceil() as usize;
// baby step
let mut step = 1;
for i in 0..m {
h_map.insert((step * b) % n, i);
step = (step * a) % n;
}
// Now step = a^m (mod n), giant step
let giant_step = step;
for i in (m..=n).step_by(m) {
if let Some(v) = h_map.get(&step) {
return Some(i - v);
}
step = (step * giant_step) % n;
}
None
}
#[cfg(test)]
mod tests {
use super::baby_step_giant_step;
#[test]
fn small_numbers() {
assert_eq!(baby_step_giant_step(5, 3, 11), Some(2));
assert_eq!(baby_step_giant_step(3, 83, 100), Some(9));
assert_eq!(baby_step_giant_step(9, 1, 61), Some(5));
assert_eq!(baby_step_giant_step(5, 1, 67), Some(22));
assert_eq!(baby_step_giant_step(7, 1, 45), Some(12));
}
#[test]
fn primitive_root_tests() {
assert_eq!(
baby_step_giant_step(3, 311401496, 998244353),
Some(178105253)
);
assert_eq!(
baby_step_giant_step(5, 324637211, 1000000007),
Some(976653449)
);
}
#[test]
fn random_numbers() {
assert_eq!(baby_step_giant_step(174857, 48604, 150991), Some(177));
assert_eq!(baby_step_giant_step(912103, 53821, 75401), Some(2644));
assert_eq!(baby_step_giant_step(448447, 365819, 671851), Some(23242));
assert_eq!(
baby_step_giant_step(220757103, 92430653, 434948279),
Some(862704)
);
assert_eq!(
baby_step_giant_step(176908456, 23538399, 142357679),
Some(14215560)
);
}
#[test]
fn no_solution() {
assert!(baby_step_giant_step(7, 6, 45).is_none());
assert!(baby_step_giant_step(23, 15, 85).is_none());
assert!(baby_step_giant_step(2, 1, 84).is_none());
}
}
| rust | {
"argument_definitions": [],
"end_line": 35,
"name": "baby_step_giant_step",
"signature": "pub fn baby_step_giant_step(a: usize, b: usize, n: usize) -> Option<usize>",
"start_line": 13
} | {
"class_name": "",
"class_signature": ""
} |
trial_division | Rust-master/src/math/trial_division.rs | pub fn trial_division(mut num: i128) -> Vec<i128> {
if num < 0 {
return trial_division(-num);
}
let mut result: Vec<i128> = vec![];
if num == 0 {
return result;
}
while num % 2 == 0 {
result.push(2);
num /= 2;
num = double_to_int(floor(num as f64, 0))
}
let mut f: i128 = 3;
while f.pow(2) <= num {
if num % f == 0 {
result.push(f);
num /= f;
num = double_to_int(floor(num as f64, 0))
} else {
f += 2
}
}
if num != 1 {
result.push(num)
}
result
} | fn floor(value: f64, scale: u8) -> f64 {
let multiplier = 10i64.pow(scale as u32) as f64;
(value * multiplier).floor()
}
fn double_to_int(amount: f64) -> i128 {
amount.round() as i128
}
pub fn trial_division(mut num: i128) -> Vec<i128> {
if num < 0 {
return trial_division(-num);
}
let mut result: Vec<i128> = vec![];
if num == 0 {
return result;
}
while num % 2 == 0 {
result.push(2);
num /= 2;
num = double_to_int(floor(num as f64, 0))
}
let mut f: i128 = 3;
while f.pow(2) <= num {
if num % f == 0 {
result.push(f);
num /= f;
num = double_to_int(floor(num as f64, 0))
} else {
f += 2
}
}
if num != 1 {
result.push(num)
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
assert_eq!(trial_division(0), vec![]);
assert_eq!(trial_division(1), vec![]);
assert_eq!(trial_division(9), vec!(3, 3));
assert_eq!(trial_division(-9), vec!(3, 3));
assert_eq!(trial_division(10), vec!(2, 5));
assert_eq!(trial_division(11), vec!(11));
assert_eq!(trial_division(33), vec!(3, 11));
assert_eq!(trial_division(2003), vec!(2003));
assert_eq!(trial_division(100001), vec!(11, 9091));
}
}
| rust | {
"argument_definitions": [],
"end_line": 41,
"name": "trial_division",
"signature": "pub fn trial_division(mut num: i128) -> Vec<i128>",
"start_line": 10
} | {
"class_name": "",
"class_signature": ""
} |
zellers_congruence_algorithm | Rust-master/src/math/zellers_congruence_algorithm.rs | pub fn zellers_congruence_algorithm(date: i32, month: i32, year: i32, as_string: bool) -> String {
let q = date;
let (m, y) = if month < 3 {
(month + 12, year - 1)
} else {
(month, year)
};
let day: i32 =
(q + (26 * (m + 1) / 10) + (y % 100) + ((y % 100) / 4) + ((y / 100) / 4) + (5 * (y / 100)))
% 7;
if as_string {
number_to_day(day)
} else {
day.to_string()
}
/* Note that the day follows the following guidelines:
0 = Saturday
1 = Sunday
2 = Monday
3 = Tuesday
4 = Wednesday
5 = Thursday
6 = Friday
*/
} | // returns the day of the week from the Gregorian Date
pub fn zellers_congruence_algorithm(date: i32, month: i32, year: i32, as_string: bool) -> String {
let q = date;
let (m, y) = if month < 3 {
(month + 12, year - 1)
} else {
(month, year)
};
let day: i32 =
(q + (26 * (m + 1) / 10) + (y % 100) + ((y % 100) / 4) + ((y / 100) / 4) + (5 * (y / 100)))
% 7;
if as_string {
number_to_day(day)
} else {
day.to_string()
}
/* Note that the day follows the following guidelines:
0 = Saturday
1 = Sunday
2 = Monday
3 = Tuesday
4 = Wednesday
5 = Thursday
6 = Friday
*/
}
fn number_to_day(number: i32) -> String {
let days = [
"Saturday",
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
];
String::from(days[number as usize])
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(zellers_congruence_algorithm(25, 1, 2013, false), "6");
assert_eq!(zellers_congruence_algorithm(25, 1, 2013, true), "Friday");
assert_eq!(zellers_congruence_algorithm(16, 4, 2022, false), "0");
assert_eq!(zellers_congruence_algorithm(16, 4, 2022, true), "Saturday");
assert_eq!(zellers_congruence_algorithm(14, 12, 1978, false), "5");
assert_eq!(zellers_congruence_algorithm(15, 6, 2021, false), "3");
}
}
| rust | {
"argument_definitions": [],
"end_line": 27,
"name": "zellers_congruence_algorithm",
"signature": "pub fn zellers_congruence_algorithm(date: i32, month: i32, year: i32, as_string: bool) -> String",
"start_line": 3
} | {
"class_name": "",
"class_signature": ""
} |
prime_numbers | Rust-master/src/math/prime_numbers.rs | pub fn prime_numbers(max: usize) -> Vec<usize> {
let mut result: Vec<usize> = Vec::new();
if max >= 2 {
result.push(2)
}
for i in (3..=max).step_by(2) {
let stop: usize = (i as f64).sqrt() as usize + 1;
let mut status = true;
for j in (3..stop).step_by(2) {
if i % j == 0 {
status = false;
break;
}
}
if status {
result.push(i)
}
}
result
} | pub fn prime_numbers(max: usize) -> Vec<usize> {
let mut result: Vec<usize> = Vec::new();
if max >= 2 {
result.push(2)
}
for i in (3..=max).step_by(2) {
let stop: usize = (i as f64).sqrt() as usize + 1;
let mut status = true;
for j in (3..stop).step_by(2) {
if i % j == 0 {
status = false;
break;
}
}
if status {
result.push(i)
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
assert_eq!(prime_numbers(0), vec![]);
assert_eq!(prime_numbers(11), vec![2, 3, 5, 7, 11]);
assert_eq!(prime_numbers(25), vec![2, 3, 5, 7, 11, 13, 17, 19, 23]);
assert_eq!(
prime_numbers(33),
vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
);
}
}
| rust | {
"argument_definitions": [],
"end_line": 23,
"name": "prime_numbers",
"signature": "pub fn prime_numbers(max: usize) -> Vec<usize>",
"start_line": 1
} | {
"class_name": "",
"class_signature": ""
} |
contains_cell | alacritty-master/alacritty_terminal/src/selection.rs | pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Indexed<T> {\n pub point: Point,\n pub cell: T,\n}",
"pub struct Cell {\n pub c: char,\n pub fg: Color,\n pub bg: Color,\n pub flags: Flags,\n pub extra: Option<Arc<CellExtra>>,\n}"
],
"name": "indexed",
"type": "&Indexed<&Cell>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
},
{
"definitions": [
"pub enum CursorShape {\n /// Cursor is a block like `▒`.\n #[default]\n Block,\n\n /// Cursor is an underscore like `_`.\n Underline,\n\n /// Cursor is a vertical bar `⎸`.\n Beam,\n\n /// Cursor is a box like `☐`.\n HollowBlock,\n\n /// Invisible cursor.\n Hidden,\n}"
],
"name": "shape",
"type": "CursorShape"
}
],
"end_line": 88,
"name": "contains_cell",
"signature": "pub fn contains_cell(\n &self,\n indexed: &Indexed<&Cell>,\n point: Point,\n shape: CursorShape,\n ) -> bool",
"start_line": 60
} | {
"class_name": "impl SelectionRange {\n /// Check if a point lies within the selection.\n pub fn contains(&self, point: Point) -> bool {\n self.start.line <= point.line\n && self.end.line >= point.line\n && (self.start.column <= point.column\n || (self.start.line != point.line && !self.is_block))\n && (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))\n }\n\n /// Check if the cell at a point is part of the selection.\n pub fn contains_cell(\n &self,\n indexed: &Indexed<&Cell>,\n point: Point,\n shape: CursorShape,\n ) -> bool {\n // Do not invert block cursor at selection boundaries.\n if shape == CursorShape::Block\n && point == indexed.point\n && (self.start == indexed.point\n || self.end == indexed.point\n || (self.is_block\n && ((self.start.line == indexed.point.line\n && self.end.column == indexed.point.column)\n || (self.end.line == indexed.point.line\n && self.start.column == indexed.point.column))))\n {\n return false;\n }\n\n // Point itself is selected.\n if self.contains(indexed.point) {\n return true;\n }\n\n // Check if a wide char's trailing spacer is selected.\n indexed.cell.flags().contains(Flags::WIDE_CHAR)\n && self.contains(Point::new(indexed.point.line, indexed.point.column + 1))\n }\n}",
"class_signature": "impl SelectionRange"
} |
rotate | alacritty-master/alacritty_terminal/src/selection.rs | pub fn rotate(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Range<Idx> {\n /// The lower bound of the range (inclusive).\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n pub start: Idx,\n /// The upper bound of the range (exclusive).\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n pub end: Idx,\n}",
"impl Line {\n /// Clamp a line to a grid boundary.\n #[must_use]\n pub fn grid_clamp<D: Dimensions>(self, dimensions: &D, boundary: Boundary) -> Self {\n match boundary {\n Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),\n Boundary::Grid => {\n let bottommost_line = dimensions.bottommost_line();\n let topmost_line = dimensions.topmost_line();\n max(topmost_line, min(bottommost_line, self))\n },\n Boundary::None => {\n let screen_lines = dimensions.screen_lines() as i32;\n let total_lines = dimensions.total_lines() as i32;\n\n if self >= screen_lines {\n let topmost_line = dimensions.topmost_line();\n let extra = (self.0 - screen_lines) % total_lines;\n topmost_line + extra\n } else {\n let bottommost_line = dimensions.bottommost_line();\n let extra = (self.0 - screen_lines + 1) % total_lines;\n bottommost_line + extra\n }\n },\n }\n }\n}"
],
"name": "range",
"type": "&Range<Line>"
}
],
"end_line": 191,
"name": "rotate",
"signature": "pub fn rotate(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection>",
"start_line": 137
} | {
"class_name": "impl Selection {\n pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {\n Self {\n region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },\n ty,\n }\n }\n\n /// Update the end of the selection.\n pub fn update(&mut self, point: Point, side: Side) {\n self.region.end = Anchor::new(point, side);\n }\n\n pub fn rotate<D: Dimensions>(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection> {\n let bottommost_line = dimensions.bottommost_line();\n let range_bottom = range.end;\n let range_top = range.start;\n\n let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Rotate start of selection.\n if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {\n start.point.line = min(start.point.line - delta, bottommost_line);\n\n // If end is within the same region, delete selection once start rotates out.\n if start.point.line >= range_bottom && end.point.line < range_bottom {\n return None;\n }\n\n // Clamp selection to start of region.\n if start.point.line < range_top && range_top != 0 {\n if self.ty != SelectionType::Block {\n start.point.column = Column(0);\n start.side = Side::Left;\n }\n start.point.line = range_top;\n }\n }\n\n // Rotate end of selection.\n if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {\n end.point.line = min(end.point.line - delta, bottommost_line);\n\n // Delete selection if end has overtaken the start.\n if end.point.line < start.point.line {\n return None;\n }\n\n // Clamp selection to end of region.\n if end.point.line >= range_bottom {\n if self.ty != SelectionType::Block {\n end.point.column = dimensions.last_column();\n end.side = Side::Right;\n }\n end.point.line = range_bottom - 1;\n }\n }\n\n Some(self)\n }\n\n pub fn is_empty(&self) -> bool {\n match self.ty {\n SelectionType::Simple => {\n let (mut start, mut end) = (self.region.start, self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Simple selection is empty when the points are identical\n // or two adjacent cells have the sides right -> left.\n start == end\n || (start.side == Side::Right\n && end.side == Side::Left\n && (start.point.line == end.point.line)\n && start.point.column + 1 == end.point.column)\n },\n SelectionType::Block => {\n let (start, end) = (self.region.start, self.region.end);\n\n // Block selection is empty when the points' columns and sides are identical\n // or two cells with adjacent columns have the sides right -> left,\n // regardless of their lines\n (start.point.column == end.point.column && start.side == end.side)\n || (start.point.column + 1 == end.point.column\n && start.side == Side::Right\n && end.side == Side::Left)\n || (end.point.column + 1 == start.point.column\n && start.side == Side::Left\n && end.side == Side::Right)\n },\n SelectionType::Semantic | SelectionType::Lines => false,\n }\n }\n\n /// Check whether selection contains any point in a given range.\n pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {\n let mut start = self.region.start.point.line;\n let mut end = self.region.end.point.line;\n\n if start > end {\n mem::swap(&mut start, &mut end);\n }\n\n let range_top = match range.start_bound() {\n Bound::Included(&range_start) => range_start,\n Bound::Excluded(&range_start) => range_start + 1,\n Bound::Unbounded => Line(i32::MIN),\n };\n\n let range_bottom = match range.end_bound() {\n Bound::Included(&range_end) => range_end,\n Bound::Excluded(&range_end) => range_end - 1,\n Bound::Unbounded => Line(i32::MAX),\n };\n\n range_bottom >= start && range_top <= end\n }\n\n /// Expand selection sides to include all cells.\n pub fn include_all(&mut self) {\n let (start, end) = (self.region.start.point, self.region.end.point);\n let (start_side, end_side) = match self.ty {\n SelectionType::Block\n if start.column > end.column\n || (start.column == end.column && start.line > end.line) =>\n {\n (Side::Right, Side::Left)\n },\n SelectionType::Block => (Side::Left, Side::Right),\n _ if start > end => (Side::Right, Side::Left),\n _ => (Side::Left, Side::Right),\n };\n\n self.region.start.side = start_side;\n self.region.end.side = end_side;\n }\n\n /// Convert selection to grid coordinates.\n pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {\n let grid = term.grid();\n let columns = grid.columns();\n\n // Order start above the end.\n let mut start = self.region.start;\n let mut end = self.region.end;\n\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Clamp selection to within grid boundaries.\n if end.point.line < term.topmost_line() {\n return None;\n }\n start.point = start.point.grid_clamp(term, Boundary::Grid);\n end.point = end.point.grid_clamp(term, Boundary::Grid);\n\n match self.ty {\n SelectionType::Simple => self.range_simple(start, end, columns),\n SelectionType::Block => self.range_block(start, end),\n SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),\n SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),\n }\n }\n\n fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {\n if start == end {\n if let Some(matching) = term.bracket_search(start) {\n if (matching.line == start.line && matching.column < start.column)\n || (matching.line < start.line)\n {\n start = matching;\n } else {\n end = matching;\n }\n\n return SelectionRange { start, end, is_block: false };\n }\n }\n\n let start = term.semantic_search_left(start);\n let end = term.semantic_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {\n let start = term.line_search_left(start);\n let end = term.line_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point {\n // Special case when selection ends to left of first cell.\n if end.point.column == 0 {\n end.point.column = Column(columns - 1);\n end.point.line -= 1;\n } else {\n end.point.column -= 1;\n }\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n\n // Wrap to next line when selection starts to the right of last column.\n if start.point.column == columns {\n start.point.column = Column(0);\n start.point.line += 1;\n }\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: false })\n }\n\n fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Always go top-left -> bottom-right.\n if start.point.column > end.point.column {\n mem::swap(&mut start.side, &mut end.side);\n mem::swap(&mut start.point.column, &mut end.point.column);\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {\n end.point.column -= 1;\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: true })\n }\n}",
"class_signature": "impl Selection"
} |
intersects_range | alacritty-master/alacritty_terminal/src/selection.rs | pub fn intersects_range(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [],
"end_line": 249,
"name": "intersects_range",
"signature": "pub fn intersects_range(&self, range: R) -> bool",
"start_line": 228
} | {
"class_name": "impl Selection {\n pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {\n Self {\n region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },\n ty,\n }\n }\n\n /// Update the end of the selection.\n pub fn update(&mut self, point: Point, side: Side) {\n self.region.end = Anchor::new(point, side);\n }\n\n pub fn rotate<D: Dimensions>(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection> {\n let bottommost_line = dimensions.bottommost_line();\n let range_bottom = range.end;\n let range_top = range.start;\n\n let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Rotate start of selection.\n if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {\n start.point.line = min(start.point.line - delta, bottommost_line);\n\n // If end is within the same region, delete selection once start rotates out.\n if start.point.line >= range_bottom && end.point.line < range_bottom {\n return None;\n }\n\n // Clamp selection to start of region.\n if start.point.line < range_top && range_top != 0 {\n if self.ty != SelectionType::Block {\n start.point.column = Column(0);\n start.side = Side::Left;\n }\n start.point.line = range_top;\n }\n }\n\n // Rotate end of selection.\n if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {\n end.point.line = min(end.point.line - delta, bottommost_line);\n\n // Delete selection if end has overtaken the start.\n if end.point.line < start.point.line {\n return None;\n }\n\n // Clamp selection to end of region.\n if end.point.line >= range_bottom {\n if self.ty != SelectionType::Block {\n end.point.column = dimensions.last_column();\n end.side = Side::Right;\n }\n end.point.line = range_bottom - 1;\n }\n }\n\n Some(self)\n }\n\n pub fn is_empty(&self) -> bool {\n match self.ty {\n SelectionType::Simple => {\n let (mut start, mut end) = (self.region.start, self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Simple selection is empty when the points are identical\n // or two adjacent cells have the sides right -> left.\n start == end\n || (start.side == Side::Right\n && end.side == Side::Left\n && (start.point.line == end.point.line)\n && start.point.column + 1 == end.point.column)\n },\n SelectionType::Block => {\n let (start, end) = (self.region.start, self.region.end);\n\n // Block selection is empty when the points' columns and sides are identical\n // or two cells with adjacent columns have the sides right -> left,\n // regardless of their lines\n (start.point.column == end.point.column && start.side == end.side)\n || (start.point.column + 1 == end.point.column\n && start.side == Side::Right\n && end.side == Side::Left)\n || (end.point.column + 1 == start.point.column\n && start.side == Side::Left\n && end.side == Side::Right)\n },\n SelectionType::Semantic | SelectionType::Lines => false,\n }\n }\n\n /// Check whether selection contains any point in a given range.\n pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {\n let mut start = self.region.start.point.line;\n let mut end = self.region.end.point.line;\n\n if start > end {\n mem::swap(&mut start, &mut end);\n }\n\n let range_top = match range.start_bound() {\n Bound::Included(&range_start) => range_start,\n Bound::Excluded(&range_start) => range_start + 1,\n Bound::Unbounded => Line(i32::MIN),\n };\n\n let range_bottom = match range.end_bound() {\n Bound::Included(&range_end) => range_end,\n Bound::Excluded(&range_end) => range_end - 1,\n Bound::Unbounded => Line(i32::MAX),\n };\n\n range_bottom >= start && range_top <= end\n }\n\n /// Expand selection sides to include all cells.\n pub fn include_all(&mut self) {\n let (start, end) = (self.region.start.point, self.region.end.point);\n let (start_side, end_side) = match self.ty {\n SelectionType::Block\n if start.column > end.column\n || (start.column == end.column && start.line > end.line) =>\n {\n (Side::Right, Side::Left)\n },\n SelectionType::Block => (Side::Left, Side::Right),\n _ if start > end => (Side::Right, Side::Left),\n _ => (Side::Left, Side::Right),\n };\n\n self.region.start.side = start_side;\n self.region.end.side = end_side;\n }\n\n /// Convert selection to grid coordinates.\n pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {\n let grid = term.grid();\n let columns = grid.columns();\n\n // Order start above the end.\n let mut start = self.region.start;\n let mut end = self.region.end;\n\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Clamp selection to within grid boundaries.\n if end.point.line < term.topmost_line() {\n return None;\n }\n start.point = start.point.grid_clamp(term, Boundary::Grid);\n end.point = end.point.grid_clamp(term, Boundary::Grid);\n\n match self.ty {\n SelectionType::Simple => self.range_simple(start, end, columns),\n SelectionType::Block => self.range_block(start, end),\n SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),\n SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),\n }\n }\n\n fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {\n if start == end {\n if let Some(matching) = term.bracket_search(start) {\n if (matching.line == start.line && matching.column < start.column)\n || (matching.line < start.line)\n {\n start = matching;\n } else {\n end = matching;\n }\n\n return SelectionRange { start, end, is_block: false };\n }\n }\n\n let start = term.semantic_search_left(start);\n let end = term.semantic_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {\n let start = term.line_search_left(start);\n let end = term.line_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point {\n // Special case when selection ends to left of first cell.\n if end.point.column == 0 {\n end.point.column = Column(columns - 1);\n end.point.line -= 1;\n } else {\n end.point.column -= 1;\n }\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n\n // Wrap to next line when selection starts to the right of last column.\n if start.point.column == columns {\n start.point.column = Column(0);\n start.point.line += 1;\n }\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: false })\n }\n\n fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Always go top-left -> bottom-right.\n if start.point.column > end.point.column {\n mem::swap(&mut start.side, &mut end.side);\n mem::swap(&mut start.point.column, &mut end.point.column);\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {\n end.point.column -= 1;\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: true })\n }\n}",
"class_signature": "impl Selection"
} |
range_semantic | alacritty-master/alacritty_terminal/src/selection.rs | fn range_semantic(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "start",
"type": "Point"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "end",
"type": "Point"
}
],
"end_line": 317,
"name": "range_semantic",
"signature": "fn range_semantic(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange",
"start_line": 298
} | {
"class_name": "impl Selection {\n pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {\n Self {\n region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },\n ty,\n }\n }\n\n /// Update the end of the selection.\n pub fn update(&mut self, point: Point, side: Side) {\n self.region.end = Anchor::new(point, side);\n }\n\n pub fn rotate<D: Dimensions>(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection> {\n let bottommost_line = dimensions.bottommost_line();\n let range_bottom = range.end;\n let range_top = range.start;\n\n let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Rotate start of selection.\n if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {\n start.point.line = min(start.point.line - delta, bottommost_line);\n\n // If end is within the same region, delete selection once start rotates out.\n if start.point.line >= range_bottom && end.point.line < range_bottom {\n return None;\n }\n\n // Clamp selection to start of region.\n if start.point.line < range_top && range_top != 0 {\n if self.ty != SelectionType::Block {\n start.point.column = Column(0);\n start.side = Side::Left;\n }\n start.point.line = range_top;\n }\n }\n\n // Rotate end of selection.\n if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {\n end.point.line = min(end.point.line - delta, bottommost_line);\n\n // Delete selection if end has overtaken the start.\n if end.point.line < start.point.line {\n return None;\n }\n\n // Clamp selection to end of region.\n if end.point.line >= range_bottom {\n if self.ty != SelectionType::Block {\n end.point.column = dimensions.last_column();\n end.side = Side::Right;\n }\n end.point.line = range_bottom - 1;\n }\n }\n\n Some(self)\n }\n\n pub fn is_empty(&self) -> bool {\n match self.ty {\n SelectionType::Simple => {\n let (mut start, mut end) = (self.region.start, self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Simple selection is empty when the points are identical\n // or two adjacent cells have the sides right -> left.\n start == end\n || (start.side == Side::Right\n && end.side == Side::Left\n && (start.point.line == end.point.line)\n && start.point.column + 1 == end.point.column)\n },\n SelectionType::Block => {\n let (start, end) = (self.region.start, self.region.end);\n\n // Block selection is empty when the points' columns and sides are identical\n // or two cells with adjacent columns have the sides right -> left,\n // regardless of their lines\n (start.point.column == end.point.column && start.side == end.side)\n || (start.point.column + 1 == end.point.column\n && start.side == Side::Right\n && end.side == Side::Left)\n || (end.point.column + 1 == start.point.column\n && start.side == Side::Left\n && end.side == Side::Right)\n },\n SelectionType::Semantic | SelectionType::Lines => false,\n }\n }\n\n /// Check whether selection contains any point in a given range.\n pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {\n let mut start = self.region.start.point.line;\n let mut end = self.region.end.point.line;\n\n if start > end {\n mem::swap(&mut start, &mut end);\n }\n\n let range_top = match range.start_bound() {\n Bound::Included(&range_start) => range_start,\n Bound::Excluded(&range_start) => range_start + 1,\n Bound::Unbounded => Line(i32::MIN),\n };\n\n let range_bottom = match range.end_bound() {\n Bound::Included(&range_end) => range_end,\n Bound::Excluded(&range_end) => range_end - 1,\n Bound::Unbounded => Line(i32::MAX),\n };\n\n range_bottom >= start && range_top <= end\n }\n\n /// Expand selection sides to include all cells.\n pub fn include_all(&mut self) {\n let (start, end) = (self.region.start.point, self.region.end.point);\n let (start_side, end_side) = match self.ty {\n SelectionType::Block\n if start.column > end.column\n || (start.column == end.column && start.line > end.line) =>\n {\n (Side::Right, Side::Left)\n },\n SelectionType::Block => (Side::Left, Side::Right),\n _ if start > end => (Side::Right, Side::Left),\n _ => (Side::Left, Side::Right),\n };\n\n self.region.start.side = start_side;\n self.region.end.side = end_side;\n }\n\n /// Convert selection to grid coordinates.\n pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {\n let grid = term.grid();\n let columns = grid.columns();\n\n // Order start above the end.\n let mut start = self.region.start;\n let mut end = self.region.end;\n\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Clamp selection to within grid boundaries.\n if end.point.line < term.topmost_line() {\n return None;\n }\n start.point = start.point.grid_clamp(term, Boundary::Grid);\n end.point = end.point.grid_clamp(term, Boundary::Grid);\n\n match self.ty {\n SelectionType::Simple => self.range_simple(start, end, columns),\n SelectionType::Block => self.range_block(start, end),\n SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),\n SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),\n }\n }\n\n fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {\n if start == end {\n if let Some(matching) = term.bracket_search(start) {\n if (matching.line == start.line && matching.column < start.column)\n || (matching.line < start.line)\n {\n start = matching;\n } else {\n end = matching;\n }\n\n return SelectionRange { start, end, is_block: false };\n }\n }\n\n let start = term.semantic_search_left(start);\n let end = term.semantic_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {\n let start = term.line_search_left(start);\n let end = term.line_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point {\n // Special case when selection ends to left of first cell.\n if end.point.column == 0 {\n end.point.column = Column(columns - 1);\n end.point.line -= 1;\n } else {\n end.point.column -= 1;\n }\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n\n // Wrap to next line when selection starts to the right of last column.\n if start.point.column == columns {\n start.point.column = Column(0);\n start.point.line += 1;\n }\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: false })\n }\n\n fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Always go top-left -> bottom-right.\n if start.point.column > end.point.column {\n mem::swap(&mut start.side, &mut end.side);\n mem::swap(&mut start.point.column, &mut end.point.column);\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {\n end.point.column -= 1;\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: true })\n }\n}",
"class_signature": "impl Selection"
} |
range_simple | alacritty-master/alacritty_terminal/src/selection.rs | fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"struct Anchor {\n point: Point,\n side: Side,\n}"
],
"name": "start",
"type": "Anchor"
},
{
"definitions": [
"struct Anchor {\n point: Point,\n side: Side,\n}"
],
"name": "end",
"type": "Anchor"
}
],
"end_line": 359,
"name": "range_simple",
"signature": "fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange>",
"start_line": 326
} | {
"class_name": "impl Selection {\n pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {\n Self {\n region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },\n ty,\n }\n }\n\n /// Update the end of the selection.\n pub fn update(&mut self, point: Point, side: Side) {\n self.region.end = Anchor::new(point, side);\n }\n\n pub fn rotate<D: Dimensions>(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection> {\n let bottommost_line = dimensions.bottommost_line();\n let range_bottom = range.end;\n let range_top = range.start;\n\n let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Rotate start of selection.\n if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {\n start.point.line = min(start.point.line - delta, bottommost_line);\n\n // If end is within the same region, delete selection once start rotates out.\n if start.point.line >= range_bottom && end.point.line < range_bottom {\n return None;\n }\n\n // Clamp selection to start of region.\n if start.point.line < range_top && range_top != 0 {\n if self.ty != SelectionType::Block {\n start.point.column = Column(0);\n start.side = Side::Left;\n }\n start.point.line = range_top;\n }\n }\n\n // Rotate end of selection.\n if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {\n end.point.line = min(end.point.line - delta, bottommost_line);\n\n // Delete selection if end has overtaken the start.\n if end.point.line < start.point.line {\n return None;\n }\n\n // Clamp selection to end of region.\n if end.point.line >= range_bottom {\n if self.ty != SelectionType::Block {\n end.point.column = dimensions.last_column();\n end.side = Side::Right;\n }\n end.point.line = range_bottom - 1;\n }\n }\n\n Some(self)\n }\n\n pub fn is_empty(&self) -> bool {\n match self.ty {\n SelectionType::Simple => {\n let (mut start, mut end) = (self.region.start, self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Simple selection is empty when the points are identical\n // or two adjacent cells have the sides right -> left.\n start == end\n || (start.side == Side::Right\n && end.side == Side::Left\n && (start.point.line == end.point.line)\n && start.point.column + 1 == end.point.column)\n },\n SelectionType::Block => {\n let (start, end) = (self.region.start, self.region.end);\n\n // Block selection is empty when the points' columns and sides are identical\n // or two cells with adjacent columns have the sides right -> left,\n // regardless of their lines\n (start.point.column == end.point.column && start.side == end.side)\n || (start.point.column + 1 == end.point.column\n && start.side == Side::Right\n && end.side == Side::Left)\n || (end.point.column + 1 == start.point.column\n && start.side == Side::Left\n && end.side == Side::Right)\n },\n SelectionType::Semantic | SelectionType::Lines => false,\n }\n }\n\n /// Check whether selection contains any point in a given range.\n pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {\n let mut start = self.region.start.point.line;\n let mut end = self.region.end.point.line;\n\n if start > end {\n mem::swap(&mut start, &mut end);\n }\n\n let range_top = match range.start_bound() {\n Bound::Included(&range_start) => range_start,\n Bound::Excluded(&range_start) => range_start + 1,\n Bound::Unbounded => Line(i32::MIN),\n };\n\n let range_bottom = match range.end_bound() {\n Bound::Included(&range_end) => range_end,\n Bound::Excluded(&range_end) => range_end - 1,\n Bound::Unbounded => Line(i32::MAX),\n };\n\n range_bottom >= start && range_top <= end\n }\n\n /// Expand selection sides to include all cells.\n pub fn include_all(&mut self) {\n let (start, end) = (self.region.start.point, self.region.end.point);\n let (start_side, end_side) = match self.ty {\n SelectionType::Block\n if start.column > end.column\n || (start.column == end.column && start.line > end.line) =>\n {\n (Side::Right, Side::Left)\n },\n SelectionType::Block => (Side::Left, Side::Right),\n _ if start > end => (Side::Right, Side::Left),\n _ => (Side::Left, Side::Right),\n };\n\n self.region.start.side = start_side;\n self.region.end.side = end_side;\n }\n\n /// Convert selection to grid coordinates.\n pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {\n let grid = term.grid();\n let columns = grid.columns();\n\n // Order start above the end.\n let mut start = self.region.start;\n let mut end = self.region.end;\n\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Clamp selection to within grid boundaries.\n if end.point.line < term.topmost_line() {\n return None;\n }\n start.point = start.point.grid_clamp(term, Boundary::Grid);\n end.point = end.point.grid_clamp(term, Boundary::Grid);\n\n match self.ty {\n SelectionType::Simple => self.range_simple(start, end, columns),\n SelectionType::Block => self.range_block(start, end),\n SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),\n SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),\n }\n }\n\n fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {\n if start == end {\n if let Some(matching) = term.bracket_search(start) {\n if (matching.line == start.line && matching.column < start.column)\n || (matching.line < start.line)\n {\n start = matching;\n } else {\n end = matching;\n }\n\n return SelectionRange { start, end, is_block: false };\n }\n }\n\n let start = term.semantic_search_left(start);\n let end = term.semantic_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {\n let start = term.line_search_left(start);\n let end = term.line_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point {\n // Special case when selection ends to left of first cell.\n if end.point.column == 0 {\n end.point.column = Column(columns - 1);\n end.point.line -= 1;\n } else {\n end.point.column -= 1;\n }\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n\n // Wrap to next line when selection starts to the right of last column.\n if start.point.column == columns {\n start.point.column = Column(0);\n start.point.line += 1;\n }\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: false })\n }\n\n fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Always go top-left -> bottom-right.\n if start.point.column > end.point.column {\n mem::swap(&mut start.side, &mut end.side);\n mem::swap(&mut start.point.column, &mut end.point.column);\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {\n end.point.column -= 1;\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: true })\n }\n}",
"class_signature": "impl Selection"
} |
range_block | alacritty-master/alacritty_terminal/src/selection.rs | fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
} | //! State management for a selection in the grid.
//!
//! A selection should start when the mouse is clicked, and it should be
//! finalized when the button is released. The selection should be cleared
//! when text is added/removed/scrolled on the screen. The selection should
//! also be cleared if the user clicks off of the selection.
use std::cmp::min;
use std::mem;
use std::ops::{Bound, Range, RangeBounds};
use crate::grid::{Dimensions, GridCell, Indexed};
use crate::index::{Boundary, Column, Line, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
use crate::vte::ansi::CursorShape;
/// A Point and side within that point.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Anchor {
point: Point,
side: Side,
}
impl Anchor {
fn new(point: Point, side: Side) -> Anchor {
Anchor { point, side }
}
}
/// Represents a range of selected cells.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SelectionRange {
/// Start point, top left of the selection.
pub start: Point,
/// End point, bottom right of the selection.
pub end: Point,
/// Whether this selection is a block selection.
pub is_block: bool,
}
impl SelectionRange {
pub fn new(start: Point, end: Point, is_block: bool) -> Self {
assert!(start <= end);
Self { start, end, is_block }
}
}
impl SelectionRange {
/// Check if a point lies within the selection.
pub fn contains(&self, point: Point) -> bool {
self.start.line <= point.line
&& self.end.line >= point.line
&& (self.start.column <= point.column
|| (self.start.line != point.line && !self.is_block))
&& (self.end.column >= point.column || (self.end.line != point.line && !self.is_block))
}
/// Check if the cell at a point is part of the selection.
pub fn contains_cell(
&self,
indexed: &Indexed<&Cell>,
point: Point,
shape: CursorShape,
) -> bool {
// Do not invert block cursor at selection boundaries.
if shape == CursorShape::Block
&& point == indexed.point
&& (self.start == indexed.point
|| self.end == indexed.point
|| (self.is_block
&& ((self.start.line == indexed.point.line
&& self.end.column == indexed.point.column)
|| (self.end.line == indexed.point.line
&& self.start.column == indexed.point.column))))
{
return false;
}
// Point itself is selected.
if self.contains(indexed.point) {
return true;
}
// Check if a wide char's trailing spacer is selected.
indexed.cell.flags().contains(Flags::WIDE_CHAR)
&& self.contains(Point::new(indexed.point.line, indexed.point.column + 1))
}
}
/// Different kinds of selection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SelectionType {
Simple,
Block,
Semantic,
Lines,
}
/// Describes a region of a 2-dimensional area.
///
/// Used to track a text selection. There are four supported modes, each with its own constructor:
/// [`simple`], [`block`], [`semantic`], and [`lines`]. The [`simple`] mode precisely tracks which
/// cells are selected without any expansion. [`block`] will select rectangular regions.
/// [`semantic`] mode expands the initial selection to the nearest semantic escape char in either
/// direction. [`lines`] will always select entire lines.
///
/// Calls to [`update`] operate different based on the selection kind. The [`simple`] and [`block`]
/// mode do nothing special, simply track points and sides. [`semantic`] will continue to expand
/// out to semantic boundaries as the selection point changes. Similarly, [`lines`] will always
/// expand the new point to encompass entire lines.
///
/// [`simple`]: enum.Selection.html#method.simple
/// [`block`]: enum.Selection.html#method.block
/// [`semantic`]: enum.Selection.html#method.semantic
/// [`lines`]: enum.Selection.html#method.lines
/// [`update`]: enum.Selection.html#method.update
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selection {
pub ty: SelectionType,
region: Range<Anchor>,
}
impl Selection {
pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {
Self {
region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },
ty,
}
}
/// Update the end of the selection.
pub fn update(&mut self, point: Point, side: Side) {
self.region.end = Anchor::new(point, side);
}
pub fn rotate<D: Dimensions>(
mut self,
dimensions: &D,
range: &Range<Line>,
delta: i32,
) -> Option<Selection> {
let bottommost_line = dimensions.bottommost_line();
let range_bottom = range.end;
let range_top = range.start;
let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Rotate start of selection.
if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {
start.point.line = min(start.point.line - delta, bottommost_line);
// If end is within the same region, delete selection once start rotates out.
if start.point.line >= range_bottom && end.point.line < range_bottom {
return None;
}
// Clamp selection to start of region.
if start.point.line < range_top && range_top != 0 {
if self.ty != SelectionType::Block {
start.point.column = Column(0);
start.side = Side::Left;
}
start.point.line = range_top;
}
}
// Rotate end of selection.
if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {
end.point.line = min(end.point.line - delta, bottommost_line);
// Delete selection if end has overtaken the start.
if end.point.line < start.point.line {
return None;
}
// Clamp selection to end of region.
if end.point.line >= range_bottom {
if self.ty != SelectionType::Block {
end.point.column = dimensions.last_column();
end.side = Side::Right;
}
end.point.line = range_bottom - 1;
}
}
Some(self)
}
pub fn is_empty(&self) -> bool {
match self.ty {
SelectionType::Simple => {
let (mut start, mut end) = (self.region.start, self.region.end);
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Simple selection is empty when the points are identical
// or two adjacent cells have the sides right -> left.
start == end
|| (start.side == Side::Right
&& end.side == Side::Left
&& (start.point.line == end.point.line)
&& start.point.column + 1 == end.point.column)
},
SelectionType::Block => {
let (start, end) = (self.region.start, self.region.end);
// Block selection is empty when the points' columns and sides are identical
// or two cells with adjacent columns have the sides right -> left,
// regardless of their lines
(start.point.column == end.point.column && start.side == end.side)
|| (start.point.column + 1 == end.point.column
&& start.side == Side::Right
&& end.side == Side::Left)
|| (end.point.column + 1 == start.point.column
&& start.side == Side::Left
&& end.side == Side::Right)
},
SelectionType::Semantic | SelectionType::Lines => false,
}
}
/// Check whether selection contains any point in a given range.
pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {
let mut start = self.region.start.point.line;
let mut end = self.region.end.point.line;
if start > end {
mem::swap(&mut start, &mut end);
}
let range_top = match range.start_bound() {
Bound::Included(&range_start) => range_start,
Bound::Excluded(&range_start) => range_start + 1,
Bound::Unbounded => Line(i32::MIN),
};
let range_bottom = match range.end_bound() {
Bound::Included(&range_end) => range_end,
Bound::Excluded(&range_end) => range_end - 1,
Bound::Unbounded => Line(i32::MAX),
};
range_bottom >= start && range_top <= end
}
/// Expand selection sides to include all cells.
pub fn include_all(&mut self) {
let (start, end) = (self.region.start.point, self.region.end.point);
let (start_side, end_side) = match self.ty {
SelectionType::Block
if start.column > end.column
|| (start.column == end.column && start.line > end.line) =>
{
(Side::Right, Side::Left)
},
SelectionType::Block => (Side::Left, Side::Right),
_ if start > end => (Side::Right, Side::Left),
_ => (Side::Left, Side::Right),
};
self.region.start.side = start_side;
self.region.end.side = end_side;
}
/// Convert selection to grid coordinates.
pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {
let grid = term.grid();
let columns = grid.columns();
// Order start above the end.
let mut start = self.region.start;
let mut end = self.region.end;
if start.point > end.point {
mem::swap(&mut start, &mut end);
}
// Clamp selection to within grid boundaries.
if end.point.line < term.topmost_line() {
return None;
}
start.point = start.point.grid_clamp(term, Boundary::Grid);
end.point = end.point.grid_clamp(term, Boundary::Grid);
match self.ty {
SelectionType::Simple => self.range_simple(start, end, columns),
SelectionType::Block => self.range_block(start, end),
SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),
SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),
}
}
fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {
if start == end {
if let Some(matching) = term.bracket_search(start) {
if (matching.line == start.line && matching.column < start.column)
|| (matching.line < start.line)
{
start = matching;
} else {
end = matching;
}
return SelectionRange { start, end, is_block: false };
}
}
let start = term.semantic_search_left(start);
let end = term.semantic_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {
let start = term.line_search_left(start);
let end = term.line_search_right(end);
SelectionRange { start, end, is_block: false }
}
fn range_simple(
&self,
mut start: Anchor,
mut end: Anchor,
columns: usize,
) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point {
// Special case when selection ends to left of first cell.
if end.point.column == 0 {
end.point.column = Column(columns - 1);
end.point.line -= 1;
} else {
end.point.column -= 1;
}
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
// Wrap to next line when selection starts to the right of last column.
if start.point.column == columns {
start.point.column = Column(0);
start.point.line += 1;
}
}
Some(SelectionRange { start: start.point, end: end.point, is_block: false })
}
fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {
if self.is_empty() {
return None;
}
// Always go top-left -> bottom-right.
if start.point.column > end.point.column {
mem::swap(&mut start.side, &mut end.side);
mem::swap(&mut start.point.column, &mut end.point.column);
}
// Remove last cell if selection ends to the left of a cell.
if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {
end.point.column -= 1;
}
// Remove first cell if selection starts at the right of a cell.
if start.side == Side::Right && start.point != end.point {
start.point.column += 1;
}
Some(SelectionRange { start: start.point, end: end.point, is_block: true })
}
}
/// Tests for selection.
///
/// There are comments on all of the tests describing the selection. Pictograms
/// are used to avoid ambiguity. Grid cells are represented by a [ ]. Only
/// cells that are completely covered are counted in a selection. Ends are
/// represented by `B` and `E` for begin and end, respectively. A selected cell
/// looks like [XX], [BX] (at the start), [XB] (at the end), [XE] (at the end),
/// and [EX] (at the start), or [BE] for a single cell. Partially selected cells
/// look like [ B] and [E ].
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Point, Side};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
fn term(height: usize, width: usize) -> Term<()> {
let size = TermSize::new(width, height);
Term::new(Config::default(), &size, ())
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [B ]
/// 3. [BE]
#[test]
fn single_cell_left_to_right() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Left);
selection.update(location, Side::Right);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test case of single cell selection.
///
/// 1. [ ]
/// 2. [ B]
/// 3. [EB]
#[test]
fn single_cell_right_to_left() {
let location = Point::new(Line(0), Column(0));
let mut selection = Selection::new(SelectionType::Simple, location, Side::Right);
selection.update(location, Side::Left);
assert_eq!(selection.to_range(&term(1, 2)).unwrap(), SelectionRange {
start: location,
end: location,
is_block: false
});
}
/// Test adjacent cell selection from left to right.
///
/// 1. [ ][ ]
/// 2. [ B][ ]
/// 3. [ B][E ]
#[test]
fn between_adjacent_cells_left_to_right() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(0)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
/// Test adjacent cell selection from right to left.
///
/// 1. [ ][ ]
/// 2. [ ][B ]
/// 3. [ E][B ]
#[test]
fn between_adjacent_cells_right_to_left() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Left);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(1, 2)), None);
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ B][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 3. [ ][ B][XX][XX][XX]
/// [XX][XE][ ][ ][ ]
#[test]
fn across_adjacent_lines_upward_final_cell_exclusive() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[rustfmt::skip]
/// Test selection across adjacent lines.
///
/// 1. [ ][ ][ ][ ][ ]
/// [ ][ ][ ][ ][ ]
/// 2. [ ][ ][ ][ ][ ]
/// [ ][ B][ ][ ][ ]
/// 3. [ ][ E][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
/// 4. [ E][XX][XX][XX][XX]
/// [XX][XB][ ][ ][ ]
#[test]
fn selection_bigger_then_smaller() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(1)), Side::Right);
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert_eq!(selection.to_range(&term(2, 5)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(1), Column(1)),
is_block: false,
});
}
#[test]
fn line_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(9), Column(1)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(0)),
end: Point::new(Line(5), Column(4)),
is_block: false,
});
}
#[test]
fn semantic_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Semantic, Point::new(Line(9), Column(3)), Side::Left);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(1)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn simple_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: false,
});
}
#[test]
fn block_selection() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(9), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(0)..Line(size.0 as i32)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(0), Column(2)),
end: Point::new(Line(5), Column(3)),
is_block: true
});
}
#[test]
fn simple_is_empty() {
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn block_is_empty() {
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(1), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(1), Column(1)), Side::Right);
assert!(!selection.is_empty());
selection.update(Point::new(Line(0), Column(0)), Side::Right);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Left);
assert!(selection.is_empty());
selection.update(Point::new(Line(0), Column(1)), Side::Right);
assert!(!selection.is_empty());
}
#[test]
fn rotate_in_region_up() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(0)),
end: Point::new(Line(3), Column(3)),
is_block: false,
});
}
#[test]
fn rotate_in_region_down() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Simple, Point::new(Line(4), Column(3)), Side::Right);
selection.update(Point::new(Line(1), Column(1)), Side::Left);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), -5).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(6), Column(1)),
end: Point::new(Line(8), size.last_column()),
is_block: false,
});
}
#[test]
fn rotate_in_region_up_block() {
let size = (10, 5);
let mut selection =
Selection::new(SelectionType::Block, Point::new(Line(7), Column(3)), Side::Right);
selection.update(Point::new(Line(4), Column(1)), Side::Right);
selection = selection.rotate(&size, &(Line(1)..Line(size.0 as i32 - 1)), 4).unwrap();
assert_eq!(selection.to_range(&term(size.0, size.1)).unwrap(), SelectionRange {
start: Point::new(Line(1), Column(2)),
end: Point::new(Line(3), Column(3)),
is_block: true,
});
}
#[test]
fn range_intersection() {
let mut selection =
Selection::new(SelectionType::Lines, Point::new(Line(3), Column(1)), Side::Left);
selection.update(Point::new(Line(6), Column(1)), Side::Right);
assert!(selection.intersects_range(..));
assert!(selection.intersects_range(Line(2)..));
assert!(selection.intersects_range(Line(2)..=Line(4)));
assert!(selection.intersects_range(Line(2)..=Line(7)));
assert!(selection.intersects_range(Line(4)..=Line(5)));
assert!(selection.intersects_range(Line(5)..Line(8)));
assert!(!selection.intersects_range(..=Line(2)));
assert!(!selection.intersects_range(Line(7)..=Line(8)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"struct Anchor {\n point: Point,\n side: Side,\n}"
],
"name": "start",
"type": "Anchor"
},
{
"definitions": [
"struct Anchor {\n point: Point,\n side: Side,\n}"
],
"name": "end",
"type": "Anchor"
}
],
"end_line": 383,
"name": "range_block",
"signature": "fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange>",
"start_line": 361
} | {
"class_name": "impl Selection {\n pub fn new(ty: SelectionType, location: Point, side: Side) -> Selection {\n Self {\n region: Range { start: Anchor::new(location, side), end: Anchor::new(location, side) },\n ty,\n }\n }\n\n /// Update the end of the selection.\n pub fn update(&mut self, point: Point, side: Side) {\n self.region.end = Anchor::new(point, side);\n }\n\n pub fn rotate<D: Dimensions>(\n mut self,\n dimensions: &D,\n range: &Range<Line>,\n delta: i32,\n ) -> Option<Selection> {\n let bottommost_line = dimensions.bottommost_line();\n let range_bottom = range.end;\n let range_top = range.start;\n\n let (mut start, mut end) = (&mut self.region.start, &mut self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Rotate start of selection.\n if (start.point.line >= range_top || range_top == 0) && start.point.line < range_bottom {\n start.point.line = min(start.point.line - delta, bottommost_line);\n\n // If end is within the same region, delete selection once start rotates out.\n if start.point.line >= range_bottom && end.point.line < range_bottom {\n return None;\n }\n\n // Clamp selection to start of region.\n if start.point.line < range_top && range_top != 0 {\n if self.ty != SelectionType::Block {\n start.point.column = Column(0);\n start.side = Side::Left;\n }\n start.point.line = range_top;\n }\n }\n\n // Rotate end of selection.\n if (end.point.line >= range_top || range_top == 0) && end.point.line < range_bottom {\n end.point.line = min(end.point.line - delta, bottommost_line);\n\n // Delete selection if end has overtaken the start.\n if end.point.line < start.point.line {\n return None;\n }\n\n // Clamp selection to end of region.\n if end.point.line >= range_bottom {\n if self.ty != SelectionType::Block {\n end.point.column = dimensions.last_column();\n end.side = Side::Right;\n }\n end.point.line = range_bottom - 1;\n }\n }\n\n Some(self)\n }\n\n pub fn is_empty(&self) -> bool {\n match self.ty {\n SelectionType::Simple => {\n let (mut start, mut end) = (self.region.start, self.region.end);\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Simple selection is empty when the points are identical\n // or two adjacent cells have the sides right -> left.\n start == end\n || (start.side == Side::Right\n && end.side == Side::Left\n && (start.point.line == end.point.line)\n && start.point.column + 1 == end.point.column)\n },\n SelectionType::Block => {\n let (start, end) = (self.region.start, self.region.end);\n\n // Block selection is empty when the points' columns and sides are identical\n // or two cells with adjacent columns have the sides right -> left,\n // regardless of their lines\n (start.point.column == end.point.column && start.side == end.side)\n || (start.point.column + 1 == end.point.column\n && start.side == Side::Right\n && end.side == Side::Left)\n || (end.point.column + 1 == start.point.column\n && start.side == Side::Left\n && end.side == Side::Right)\n },\n SelectionType::Semantic | SelectionType::Lines => false,\n }\n }\n\n /// Check whether selection contains any point in a given range.\n pub fn intersects_range<R: RangeBounds<Line>>(&self, range: R) -> bool {\n let mut start = self.region.start.point.line;\n let mut end = self.region.end.point.line;\n\n if start > end {\n mem::swap(&mut start, &mut end);\n }\n\n let range_top = match range.start_bound() {\n Bound::Included(&range_start) => range_start,\n Bound::Excluded(&range_start) => range_start + 1,\n Bound::Unbounded => Line(i32::MIN),\n };\n\n let range_bottom = match range.end_bound() {\n Bound::Included(&range_end) => range_end,\n Bound::Excluded(&range_end) => range_end - 1,\n Bound::Unbounded => Line(i32::MAX),\n };\n\n range_bottom >= start && range_top <= end\n }\n\n /// Expand selection sides to include all cells.\n pub fn include_all(&mut self) {\n let (start, end) = (self.region.start.point, self.region.end.point);\n let (start_side, end_side) = match self.ty {\n SelectionType::Block\n if start.column > end.column\n || (start.column == end.column && start.line > end.line) =>\n {\n (Side::Right, Side::Left)\n },\n SelectionType::Block => (Side::Left, Side::Right),\n _ if start > end => (Side::Right, Side::Left),\n _ => (Side::Left, Side::Right),\n };\n\n self.region.start.side = start_side;\n self.region.end.side = end_side;\n }\n\n /// Convert selection to grid coordinates.\n pub fn to_range<T>(&self, term: &Term<T>) -> Option<SelectionRange> {\n let grid = term.grid();\n let columns = grid.columns();\n\n // Order start above the end.\n let mut start = self.region.start;\n let mut end = self.region.end;\n\n if start.point > end.point {\n mem::swap(&mut start, &mut end);\n }\n\n // Clamp selection to within grid boundaries.\n if end.point.line < term.topmost_line() {\n return None;\n }\n start.point = start.point.grid_clamp(term, Boundary::Grid);\n end.point = end.point.grid_clamp(term, Boundary::Grid);\n\n match self.ty {\n SelectionType::Simple => self.range_simple(start, end, columns),\n SelectionType::Block => self.range_block(start, end),\n SelectionType::Semantic => Some(Self::range_semantic(term, start.point, end.point)),\n SelectionType::Lines => Some(Self::range_lines(term, start.point, end.point)),\n }\n }\n\n fn range_semantic<T>(term: &Term<T>, mut start: Point, mut end: Point) -> SelectionRange {\n if start == end {\n if let Some(matching) = term.bracket_search(start) {\n if (matching.line == start.line && matching.column < start.column)\n || (matching.line < start.line)\n {\n start = matching;\n } else {\n end = matching;\n }\n\n return SelectionRange { start, end, is_block: false };\n }\n }\n\n let start = term.semantic_search_left(start);\n let end = term.semantic_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_lines<T>(term: &Term<T>, start: Point, end: Point) -> SelectionRange {\n let start = term.line_search_left(start);\n let end = term.line_search_right(end);\n\n SelectionRange { start, end, is_block: false }\n }\n\n fn range_simple(\n &self,\n mut start: Anchor,\n mut end: Anchor,\n columns: usize,\n ) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point {\n // Special case when selection ends to left of first cell.\n if end.point.column == 0 {\n end.point.column = Column(columns - 1);\n end.point.line -= 1;\n } else {\n end.point.column -= 1;\n }\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n\n // Wrap to next line when selection starts to the right of last column.\n if start.point.column == columns {\n start.point.column = Column(0);\n start.point.line += 1;\n }\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: false })\n }\n\n fn range_block(&self, mut start: Anchor, mut end: Anchor) -> Option<SelectionRange> {\n if self.is_empty() {\n return None;\n }\n\n // Always go top-left -> bottom-right.\n if start.point.column > end.point.column {\n mem::swap(&mut start.side, &mut end.side);\n mem::swap(&mut start.point.column, &mut end.point.column);\n }\n\n // Remove last cell if selection ends to the left of a cell.\n if end.side == Side::Left && start.point != end.point && end.point.column.0 > 0 {\n end.point.column -= 1;\n }\n\n // Remove first cell if selection starts at the right of a cell.\n if start.side == Side::Right && start.point != end.point {\n start.point.column += 1;\n }\n\n Some(SelectionRange { start: start.point, end: end.point, is_block: true })\n }\n}",
"class_signature": "impl Selection"
} |
motion | alacritty-master/alacritty_terminal/src/vi_mode.rs | pub fn motion(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
} | use std::cmp::min;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::EventListener;
use crate::grid::{Dimensions, GridCell};
use crate::index::{Boundary, Column, Direction, Line, Point, Side};
use crate::term::cell::Flags;
use crate::term::Term;
/// Possible vi mode motion movements.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum ViMotion {
/// Move up.
Up,
/// Move down.
Down,
/// Move left.
Left,
/// Move right.
Right,
/// First column, or beginning of the line when already at the first column.
First,
/// Last column, or beginning of the line when already at the last column.
Last,
/// First non-empty cell in this terminal row, or first non-empty cell
/// of the line when already at the first cell of the row.
FirstOccupied,
/// Move to top of screen.
High,
/// Move to center of screen.
Middle,
/// Move to bottom of screen.
Low,
/// Move to start of semantically separated word.
SemanticLeft,
/// Move to start of next semantically separated word.
SemanticRight,
/// Move to end of previous semantically separated word.
SemanticLeftEnd,
/// Move to end of semantically separated word.
SemanticRightEnd,
/// Move to start of whitespace separated word.
WordLeft,
/// Move to start of next whitespace separated word.
WordRight,
/// Move to end of previous whitespace separated word.
WordLeftEnd,
/// Move to end of whitespace separated word.
WordRightEnd,
/// Move to opposing bracket.
Bracket,
/// Move above the current paragraph.
ParagraphUp,
/// Move below the current paragraph.
ParagraphDown,
}
/// Cursor tracking vi mode position.
#[derive(Default, Copy, Clone, PartialEq, Eq)]
pub struct ViModeCursor {
pub point: Point,
}
impl ViModeCursor {
pub fn new(point: Point) -> Self {
Self { point }
}
/// Move vi mode cursor.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
}
/// Get target cursor point for vim-like page movement.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {
// Clamp movement to within visible region.
let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);
// Find the first occupied cell after scrolling has been performed.
let column = first_occupied_in_line(term, line).unwrap_or_default().column;
// Move cursor.
self.point = Point::new(line, column);
self
}
}
/// Find next end of line to move to.
fn last<T>(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
}
/// Find next non-empty cell to move to.
fn first_occupied<T>(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
}
/// Move by semantically separated word, like w/b/e/ge in vi.
fn semantic<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
}
/// Move by whitespace separated word, like W/B/E/gE in vi.
fn word<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
}
/// Find first non-empty cell in line.
fn first_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.find(|&point| !is_space(term, point))
}
/// Find last non-empty cell in line.
fn last_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.rfind(|&point| !is_space(term, point))
}
/// Advance point based on direction.
fn advance<T>(term: &Term<T>, point: Point, direction: Direction) -> Point {
if direction == Direction::Left {
point.sub(term, Boundary::Grid, 1)
} else {
point.add(term, Boundary::Grid, 1)
}
}
/// Check if cell at point contains whitespace.
fn is_space<T>(term: &Term<T>, point: Point) -> bool {
let cell = &term.grid()[point.line][point.column];
!cell.flags().intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
&& (cell.c == ' ' || cell.c == '\t')
}
/// Check if the cell at a point contains the WRAPLINE flag.
fn is_wrap<T>(term: &Term<T>, point: Point) -> bool {
term.grid()[point].flags.contains(Flags::WRAPLINE)
}
/// Check if point is at screen boundary.
fn is_boundary<T>(term: &Term<T>, point: Point, direction: Direction) -> bool {
(point.line <= term.topmost_line() && point.column == 0 && direction == Direction::Left)
|| (point.line == term.bottommost_line()
&& point.column + 1 >= term.columns()
&& direction == Direction::Right)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::VoidListener;
use crate::index::{Column, Line};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
use crate::vte::ansi::Handler;
fn term() -> Term<VoidListener> {
let size = TermSize::new(20, 20);
Term::new(Config::default(), &size, VoidListener)
}
#[test]
fn motion_simple() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Down);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Up);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn simple_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = '汉';
term.grid_mut()[Line(0)][Column(1)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(3)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_start_end() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Last);
assert_eq!(cursor.point, Point::new(Line(0), Column(19)));
cursor = cursor.motion(&mut term, ViMotion::First);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_first_occupied() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = ' ';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = 'y';
term.grid_mut()[Line(0)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(1)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(2)][Column(0)].c = 'z';
term.grid_mut()[Line(2)][Column(1)].c = ' ';
let mut cursor = ViModeCursor::new(Point::new(Line(2), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(2), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn motion_high_middle_low() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::High);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Middle);
assert_eq!(cursor.point, Point::new(Line(9), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Low);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn motion_bracket() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = '(';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ')';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
fn motion_semantic_term() -> Term<VoidListener> {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = 'x';
term.grid_mut()[Line(0)][Column(3)].c = 'x';
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = ' ';
term.grid_mut()[Line(0)][Column(6)].c = ':';
term.grid_mut()[Line(0)][Column(7)].c = ' ';
term.grid_mut()[Line(0)][Column(8)].c = 'x';
term.grid_mut()[Line(0)][Column(9)].c = ':';
term.grid_mut()[Line(0)][Column(10)].c = 'x';
term.grid_mut()[Line(0)][Column(11)].c = ' ';
term.grid_mut()[Line(0)][Column(12)].c = ' ';
term.grid_mut()[Line(0)][Column(13)].c = ':';
term.grid_mut()[Line(0)][Column(14)].c = ' ';
term.grid_mut()[Line(0)][Column(15)].c = 'x';
term
}
#[test]
fn motion_semantic_right_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_semantic_right_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_semantic() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn semantic_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_word() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ';';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(4)].c = 'a';
term.grid_mut()[Line(0)][Column(5)].c = ';';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn scroll_word() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn word_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_simple() {
let mut term = term();
// Create 1 line of scrollback.
for _ in 0..20 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -1);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
}
#[test]
fn scroll_over_top() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-21), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
}
#[test]
fn scroll_over_bottom() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(-20), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn wide_semantic_char() {
let mut term = term();
term.set_semantic_escape_chars("-");
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = '-';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = 'x';
term.grid_mut()[Line(0)][Column(5)].c = 'x';
// Test motion to the right.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
// Test motion to the left.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&mut Term<T>"
},
{
"definitions": [
"pub enum ViMotion {\n /// Move up.\n Up,\n /// Move down.\n Down,\n /// Move left.\n Left,\n /// Move right.\n Right,\n /// First column, or beginning of the line when already at the first column.\n First,\n /// Last column, or beginning of the line when already at the last column.\n Last,\n /// First non-empty cell in this terminal row, or first non-empty cell\n /// of the line when already at the first cell of the row.\n FirstOccupied,\n /// Move to top of screen.\n High,\n /// Move to center of screen.\n Middle,\n /// Move to bottom of screen.\n Low,\n /// Move to start of semantically separated word.\n SemanticLeft,\n /// Move to start of next semantically separated word.\n SemanticRight,\n /// Move to end of previous semantically separated word.\n SemanticLeftEnd,\n /// Move to end of semantically separated word.\n SemanticRightEnd,\n /// Move to start of whitespace separated word.\n WordLeft,\n /// Move to start of next whitespace separated word.\n WordRight,\n /// Move to end of previous whitespace separated word.\n WordLeftEnd,\n /// Move to end of whitespace separated word.\n WordRightEnd,\n /// Move to opposing bracket.\n Bracket,\n /// Move above the current paragraph.\n ParagraphUp,\n /// Move below the current paragraph.\n ParagraphDown,\n}"
],
"name": "motion",
"type": "ViMotion"
}
],
"end_line": 186,
"name": "motion",
"signature": "pub fn motion(mut self, term: &mut Term<T>, motion: ViMotion) -> Self",
"start_line": 74
} | {
"class_name": "impl ViModeCursor {\n pub fn new(point: Point) -> Self {\n Self { point }\n }\n\n /// Move vi mode cursor.\n #[must_use = \"this returns the result of the operation, without modifying the original\"]\n pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {\n match motion {\n ViMotion::Up => {\n if self.point.line > term.topmost_line() {\n self.point.line -= 1;\n }\n },\n ViMotion::Down => {\n if self.point.line + 1 < term.screen_lines() as i32 {\n self.point.line += 1;\n }\n },\n ViMotion::Left => {\n self.point = term.expand_wide(self.point, Direction::Left);\n let wrap_point = Point::new(self.point.line - 1, term.last_column());\n if self.point.column == 0\n && self.point.line > term.topmost_line()\n && is_wrap(term, wrap_point)\n {\n self.point = wrap_point;\n } else {\n self.point.column = Column(self.point.column.saturating_sub(1));\n }\n },\n ViMotion::Right => {\n self.point = term.expand_wide(self.point, Direction::Right);\n if is_wrap(term, self.point) {\n self.point = Point::new(self.point.line + 1, Column(0));\n } else {\n self.point.column = min(self.point.column + 1, term.last_column());\n }\n },\n ViMotion::First => {\n self.point = term.expand_wide(self.point, Direction::Left);\n while self.point.column == 0\n && self.point.line > term.topmost_line()\n && is_wrap(term, Point::new(self.point.line - 1, term.last_column()))\n {\n self.point.line -= 1;\n }\n self.point.column = Column(0);\n },\n ViMotion::Last => self.point = last(term, self.point),\n ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),\n ViMotion::High => {\n let line = Line(-(term.grid().display_offset() as i32));\n let col = first_occupied_in_line(term, line).unwrap_or_default().column;\n self.point = Point::new(line, col);\n },\n ViMotion::Middle => {\n let display_offset = term.grid().display_offset() as i32;\n let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);\n let col = first_occupied_in_line(term, line).unwrap_or_default().column;\n self.point = Point::new(line, col);\n },\n ViMotion::Low => {\n let display_offset = term.grid().display_offset() as i32;\n let line = Line(-display_offset + term.screen_lines() as i32 - 1);\n let col = first_occupied_in_line(term, line).unwrap_or_default().column;\n self.point = Point::new(line, col);\n },\n ViMotion::SemanticLeft => {\n self.point = semantic(term, self.point, Direction::Left, Side::Left);\n },\n ViMotion::SemanticRight => {\n self.point = semantic(term, self.point, Direction::Right, Side::Left);\n },\n ViMotion::SemanticLeftEnd => {\n self.point = semantic(term, self.point, Direction::Left, Side::Right);\n },\n ViMotion::SemanticRightEnd => {\n self.point = semantic(term, self.point, Direction::Right, Side::Right);\n },\n ViMotion::WordLeft => {\n self.point = word(term, self.point, Direction::Left, Side::Left);\n },\n ViMotion::WordRight => {\n self.point = word(term, self.point, Direction::Right, Side::Left);\n },\n ViMotion::WordLeftEnd => {\n self.point = word(term, self.point, Direction::Left, Side::Right);\n },\n ViMotion::WordRightEnd => {\n self.point = word(term, self.point, Direction::Right, Side::Right);\n },\n ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),\n ViMotion::ParagraphUp => {\n // Skip empty lines until we find the next paragraph,\n // then skip over the paragraph until we reach the next empty line.\n let topmost_line = term.topmost_line();\n self.point.line = (*topmost_line..*self.point.line)\n .rev()\n .skip_while(|line| term.grid()[Line(*line)].is_clear())\n .find(|line| term.grid()[Line(*line)].is_clear())\n .map_or(topmost_line, Line);\n self.point.column = Column(0);\n },\n ViMotion::ParagraphDown => {\n // Skip empty lines until we find the next paragraph,\n // then skip over the paragraph until we reach the next empty line.\n let bottommost_line = term.bottommost_line();\n self.point.line = (*self.point.line..*bottommost_line)\n .skip_while(|line| term.grid()[Line(*line)].is_clear())\n .find(|line| term.grid()[Line(*line)].is_clear())\n .map_or(bottommost_line, Line);\n self.point.column = Column(0);\n },\n }\n\n term.scroll_to_point(self.point);\n\n self\n }\n\n /// Get target cursor point for vim-like page movement.\n #[must_use = \"this returns the result of the operation, without modifying the original\"]\n pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {\n // Clamp movement to within visible region.\n let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);\n\n // Find the first occupied cell after scrolling has been performed.\n let column = first_occupied_in_line(term, line).unwrap_or_default().column;\n\n // Move cursor.\n self.point = Point::new(line, column);\n\n self\n }\n}",
"class_signature": "impl ViModeCursor"
} |
last | alacritty-master/alacritty_terminal/src/vi_mode.rs | fn last(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
} | use std::cmp::min;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::EventListener;
use crate::grid::{Dimensions, GridCell};
use crate::index::{Boundary, Column, Direction, Line, Point, Side};
use crate::term::cell::Flags;
use crate::term::Term;
/// Possible vi mode motion movements.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum ViMotion {
/// Move up.
Up,
/// Move down.
Down,
/// Move left.
Left,
/// Move right.
Right,
/// First column, or beginning of the line when already at the first column.
First,
/// Last column, or beginning of the line when already at the last column.
Last,
/// First non-empty cell in this terminal row, or first non-empty cell
/// of the line when already at the first cell of the row.
FirstOccupied,
/// Move to top of screen.
High,
/// Move to center of screen.
Middle,
/// Move to bottom of screen.
Low,
/// Move to start of semantically separated word.
SemanticLeft,
/// Move to start of next semantically separated word.
SemanticRight,
/// Move to end of previous semantically separated word.
SemanticLeftEnd,
/// Move to end of semantically separated word.
SemanticRightEnd,
/// Move to start of whitespace separated word.
WordLeft,
/// Move to start of next whitespace separated word.
WordRight,
/// Move to end of previous whitespace separated word.
WordLeftEnd,
/// Move to end of whitespace separated word.
WordRightEnd,
/// Move to opposing bracket.
Bracket,
/// Move above the current paragraph.
ParagraphUp,
/// Move below the current paragraph.
ParagraphDown,
}
/// Cursor tracking vi mode position.
#[derive(Default, Copy, Clone, PartialEq, Eq)]
pub struct ViModeCursor {
pub point: Point,
}
impl ViModeCursor {
pub fn new(point: Point) -> Self {
Self { point }
}
/// Move vi mode cursor.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
}
/// Get target cursor point for vim-like page movement.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {
// Clamp movement to within visible region.
let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);
// Find the first occupied cell after scrolling has been performed.
let column = first_occupied_in_line(term, line).unwrap_or_default().column;
// Move cursor.
self.point = Point::new(line, column);
self
}
}
/// Find next end of line to move to.
fn last<T>(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
}
/// Find next non-empty cell to move to.
fn first_occupied<T>(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
}
/// Move by semantically separated word, like w/b/e/ge in vi.
fn semantic<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
}
/// Move by whitespace separated word, like W/B/E/gE in vi.
fn word<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
}
/// Find first non-empty cell in line.
fn first_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.find(|&point| !is_space(term, point))
}
/// Find last non-empty cell in line.
fn last_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.rfind(|&point| !is_space(term, point))
}
/// Advance point based on direction.
fn advance<T>(term: &Term<T>, point: Point, direction: Direction) -> Point {
if direction == Direction::Left {
point.sub(term, Boundary::Grid, 1)
} else {
point.add(term, Boundary::Grid, 1)
}
}
/// Check if cell at point contains whitespace.
fn is_space<T>(term: &Term<T>, point: Point) -> bool {
let cell = &term.grid()[point.line][point.column];
!cell.flags().intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
&& (cell.c == ' ' || cell.c == '\t')
}
/// Check if the cell at a point contains the WRAPLINE flag.
fn is_wrap<T>(term: &Term<T>, point: Point) -> bool {
term.grid()[point].flags.contains(Flags::WRAPLINE)
}
/// Check if point is at screen boundary.
fn is_boundary<T>(term: &Term<T>, point: Point, direction: Direction) -> bool {
(point.line <= term.topmost_line() && point.column == 0 && direction == Direction::Left)
|| (point.line == term.bottommost_line()
&& point.column + 1 >= term.columns()
&& direction == Direction::Right)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::VoidListener;
use crate::index::{Column, Line};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
use crate::vte::ansi::Handler;
fn term() -> Term<VoidListener> {
let size = TermSize::new(20, 20);
Term::new(Config::default(), &size, VoidListener)
}
#[test]
fn motion_simple() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Down);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Up);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn simple_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = '汉';
term.grid_mut()[Line(0)][Column(1)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(3)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_start_end() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Last);
assert_eq!(cursor.point, Point::new(Line(0), Column(19)));
cursor = cursor.motion(&mut term, ViMotion::First);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_first_occupied() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = ' ';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = 'y';
term.grid_mut()[Line(0)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(1)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(2)][Column(0)].c = 'z';
term.grid_mut()[Line(2)][Column(1)].c = ' ';
let mut cursor = ViModeCursor::new(Point::new(Line(2), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(2), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn motion_high_middle_low() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::High);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Middle);
assert_eq!(cursor.point, Point::new(Line(9), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Low);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn motion_bracket() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = '(';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ')';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
fn motion_semantic_term() -> Term<VoidListener> {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = 'x';
term.grid_mut()[Line(0)][Column(3)].c = 'x';
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = ' ';
term.grid_mut()[Line(0)][Column(6)].c = ':';
term.grid_mut()[Line(0)][Column(7)].c = ' ';
term.grid_mut()[Line(0)][Column(8)].c = 'x';
term.grid_mut()[Line(0)][Column(9)].c = ':';
term.grid_mut()[Line(0)][Column(10)].c = 'x';
term.grid_mut()[Line(0)][Column(11)].c = ' ';
term.grid_mut()[Line(0)][Column(12)].c = ' ';
term.grid_mut()[Line(0)][Column(13)].c = ':';
term.grid_mut()[Line(0)][Column(14)].c = ' ';
term.grid_mut()[Line(0)][Column(15)].c = 'x';
term
}
#[test]
fn motion_semantic_right_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_semantic_right_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_semantic() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn semantic_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_word() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ';';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(4)].c = 'a';
term.grid_mut()[Line(0)][Column(5)].c = ';';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn scroll_word() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn word_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_simple() {
let mut term = term();
// Create 1 line of scrollback.
for _ in 0..20 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -1);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
}
#[test]
fn scroll_over_top() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-21), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
}
#[test]
fn scroll_over_bottom() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(-20), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn wide_semantic_char() {
let mut term = term();
term.set_semantic_escape_chars("-");
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = '-';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = 'x';
term.grid_mut()[Line(0)][Column(5)].c = 'x';
// Test motion to the right.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
// Test motion to the left.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 226,
"name": "last",
"signature": "fn last(term: &Term<T>, mut point: Point) -> Point",
"start_line": 205
} | {
"class_name": "",
"class_signature": ""
} |
first_occupied | alacritty-master/alacritty_terminal/src/vi_mode.rs | fn first_occupied(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
} | use std::cmp::min;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::EventListener;
use crate::grid::{Dimensions, GridCell};
use crate::index::{Boundary, Column, Direction, Line, Point, Side};
use crate::term::cell::Flags;
use crate::term::Term;
/// Possible vi mode motion movements.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum ViMotion {
/// Move up.
Up,
/// Move down.
Down,
/// Move left.
Left,
/// Move right.
Right,
/// First column, or beginning of the line when already at the first column.
First,
/// Last column, or beginning of the line when already at the last column.
Last,
/// First non-empty cell in this terminal row, or first non-empty cell
/// of the line when already at the first cell of the row.
FirstOccupied,
/// Move to top of screen.
High,
/// Move to center of screen.
Middle,
/// Move to bottom of screen.
Low,
/// Move to start of semantically separated word.
SemanticLeft,
/// Move to start of next semantically separated word.
SemanticRight,
/// Move to end of previous semantically separated word.
SemanticLeftEnd,
/// Move to end of semantically separated word.
SemanticRightEnd,
/// Move to start of whitespace separated word.
WordLeft,
/// Move to start of next whitespace separated word.
WordRight,
/// Move to end of previous whitespace separated word.
WordLeftEnd,
/// Move to end of whitespace separated word.
WordRightEnd,
/// Move to opposing bracket.
Bracket,
/// Move above the current paragraph.
ParagraphUp,
/// Move below the current paragraph.
ParagraphDown,
}
/// Cursor tracking vi mode position.
#[derive(Default, Copy, Clone, PartialEq, Eq)]
pub struct ViModeCursor {
pub point: Point,
}
impl ViModeCursor {
pub fn new(point: Point) -> Self {
Self { point }
}
/// Move vi mode cursor.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
}
/// Get target cursor point for vim-like page movement.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {
// Clamp movement to within visible region.
let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);
// Find the first occupied cell after scrolling has been performed.
let column = first_occupied_in_line(term, line).unwrap_or_default().column;
// Move cursor.
self.point = Point::new(line, column);
self
}
}
/// Find next end of line to move to.
fn last<T>(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
}
/// Find next non-empty cell to move to.
fn first_occupied<T>(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
}
/// Move by semantically separated word, like w/b/e/ge in vi.
fn semantic<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
}
/// Move by whitespace separated word, like W/B/E/gE in vi.
fn word<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
}
/// Find first non-empty cell in line.
fn first_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.find(|&point| !is_space(term, point))
}
/// Find last non-empty cell in line.
fn last_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.rfind(|&point| !is_space(term, point))
}
/// Advance point based on direction.
fn advance<T>(term: &Term<T>, point: Point, direction: Direction) -> Point {
if direction == Direction::Left {
point.sub(term, Boundary::Grid, 1)
} else {
point.add(term, Boundary::Grid, 1)
}
}
/// Check if cell at point contains whitespace.
fn is_space<T>(term: &Term<T>, point: Point) -> bool {
let cell = &term.grid()[point.line][point.column];
!cell.flags().intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
&& (cell.c == ' ' || cell.c == '\t')
}
/// Check if the cell at a point contains the WRAPLINE flag.
fn is_wrap<T>(term: &Term<T>, point: Point) -> bool {
term.grid()[point].flags.contains(Flags::WRAPLINE)
}
/// Check if point is at screen boundary.
fn is_boundary<T>(term: &Term<T>, point: Point, direction: Direction) -> bool {
(point.line <= term.topmost_line() && point.column == 0 && direction == Direction::Left)
|| (point.line == term.bottommost_line()
&& point.column + 1 >= term.columns()
&& direction == Direction::Right)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::VoidListener;
use crate::index::{Column, Line};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
use crate::vte::ansi::Handler;
fn term() -> Term<VoidListener> {
let size = TermSize::new(20, 20);
Term::new(Config::default(), &size, VoidListener)
}
#[test]
fn motion_simple() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Down);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Up);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn simple_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = '汉';
term.grid_mut()[Line(0)][Column(1)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(3)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_start_end() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Last);
assert_eq!(cursor.point, Point::new(Line(0), Column(19)));
cursor = cursor.motion(&mut term, ViMotion::First);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_first_occupied() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = ' ';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = 'y';
term.grid_mut()[Line(0)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(1)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(2)][Column(0)].c = 'z';
term.grid_mut()[Line(2)][Column(1)].c = ' ';
let mut cursor = ViModeCursor::new(Point::new(Line(2), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(2), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn motion_high_middle_low() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::High);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Middle);
assert_eq!(cursor.point, Point::new(Line(9), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Low);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn motion_bracket() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = '(';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ')';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
fn motion_semantic_term() -> Term<VoidListener> {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = 'x';
term.grid_mut()[Line(0)][Column(3)].c = 'x';
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = ' ';
term.grid_mut()[Line(0)][Column(6)].c = ':';
term.grid_mut()[Line(0)][Column(7)].c = ' ';
term.grid_mut()[Line(0)][Column(8)].c = 'x';
term.grid_mut()[Line(0)][Column(9)].c = ':';
term.grid_mut()[Line(0)][Column(10)].c = 'x';
term.grid_mut()[Line(0)][Column(11)].c = ' ';
term.grid_mut()[Line(0)][Column(12)].c = ' ';
term.grid_mut()[Line(0)][Column(13)].c = ':';
term.grid_mut()[Line(0)][Column(14)].c = ' ';
term.grid_mut()[Line(0)][Column(15)].c = 'x';
term
}
#[test]
fn motion_semantic_right_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_semantic_right_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_semantic() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn semantic_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_word() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ';';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(4)].c = 'a';
term.grid_mut()[Line(0)][Column(5)].c = ';';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn scroll_word() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn word_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_simple() {
let mut term = term();
// Create 1 line of scrollback.
for _ in 0..20 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -1);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
}
#[test]
fn scroll_over_top() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-21), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
}
#[test]
fn scroll_over_bottom() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(-20), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn wide_semantic_char() {
let mut term = term();
term.set_semantic_escape_chars("-");
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = '-';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = 'x';
term.grid_mut()[Line(0)][Column(5)].c = 'x';
// Test motion to the right.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
// Test motion to the left.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 269,
"name": "first_occupied",
"signature": "fn first_occupied(term: &Term<T>, mut point: Point) -> Point",
"start_line": 229
} | {
"class_name": "",
"class_signature": ""
} |
semantic | alacritty-master/alacritty_terminal/src/vi_mode.rs | fn semantic(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
} | use std::cmp::min;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::EventListener;
use crate::grid::{Dimensions, GridCell};
use crate::index::{Boundary, Column, Direction, Line, Point, Side};
use crate::term::cell::Flags;
use crate::term::Term;
/// Possible vi mode motion movements.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum ViMotion {
/// Move up.
Up,
/// Move down.
Down,
/// Move left.
Left,
/// Move right.
Right,
/// First column, or beginning of the line when already at the first column.
First,
/// Last column, or beginning of the line when already at the last column.
Last,
/// First non-empty cell in this terminal row, or first non-empty cell
/// of the line when already at the first cell of the row.
FirstOccupied,
/// Move to top of screen.
High,
/// Move to center of screen.
Middle,
/// Move to bottom of screen.
Low,
/// Move to start of semantically separated word.
SemanticLeft,
/// Move to start of next semantically separated word.
SemanticRight,
/// Move to end of previous semantically separated word.
SemanticLeftEnd,
/// Move to end of semantically separated word.
SemanticRightEnd,
/// Move to start of whitespace separated word.
WordLeft,
/// Move to start of next whitespace separated word.
WordRight,
/// Move to end of previous whitespace separated word.
WordLeftEnd,
/// Move to end of whitespace separated word.
WordRightEnd,
/// Move to opposing bracket.
Bracket,
/// Move above the current paragraph.
ParagraphUp,
/// Move below the current paragraph.
ParagraphDown,
}
/// Cursor tracking vi mode position.
#[derive(Default, Copy, Clone, PartialEq, Eq)]
pub struct ViModeCursor {
pub point: Point,
}
impl ViModeCursor {
pub fn new(point: Point) -> Self {
Self { point }
}
/// Move vi mode cursor.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
}
/// Get target cursor point for vim-like page movement.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {
// Clamp movement to within visible region.
let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);
// Find the first occupied cell after scrolling has been performed.
let column = first_occupied_in_line(term, line).unwrap_or_default().column;
// Move cursor.
self.point = Point::new(line, column);
self
}
}
/// Find next end of line to move to.
fn last<T>(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
}
/// Find next non-empty cell to move to.
fn first_occupied<T>(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
}
/// Move by semantically separated word, like w/b/e/ge in vi.
fn semantic<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
}
/// Move by whitespace separated word, like W/B/E/gE in vi.
fn word<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
}
/// Find first non-empty cell in line.
fn first_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.find(|&point| !is_space(term, point))
}
/// Find last non-empty cell in line.
fn last_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.rfind(|&point| !is_space(term, point))
}
/// Advance point based on direction.
fn advance<T>(term: &Term<T>, point: Point, direction: Direction) -> Point {
if direction == Direction::Left {
point.sub(term, Boundary::Grid, 1)
} else {
point.add(term, Boundary::Grid, 1)
}
}
/// Check if cell at point contains whitespace.
fn is_space<T>(term: &Term<T>, point: Point) -> bool {
let cell = &term.grid()[point.line][point.column];
!cell.flags().intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
&& (cell.c == ' ' || cell.c == '\t')
}
/// Check if the cell at a point contains the WRAPLINE flag.
fn is_wrap<T>(term: &Term<T>, point: Point) -> bool {
term.grid()[point].flags.contains(Flags::WRAPLINE)
}
/// Check if point is at screen boundary.
fn is_boundary<T>(term: &Term<T>, point: Point, direction: Direction) -> bool {
(point.line <= term.topmost_line() && point.column == 0 && direction == Direction::Left)
|| (point.line == term.bottommost_line()
&& point.column + 1 >= term.columns()
&& direction == Direction::Right)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::VoidListener;
use crate::index::{Column, Line};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
use crate::vte::ansi::Handler;
fn term() -> Term<VoidListener> {
let size = TermSize::new(20, 20);
Term::new(Config::default(), &size, VoidListener)
}
#[test]
fn motion_simple() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Down);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Up);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn simple_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = '汉';
term.grid_mut()[Line(0)][Column(1)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(3)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_start_end() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Last);
assert_eq!(cursor.point, Point::new(Line(0), Column(19)));
cursor = cursor.motion(&mut term, ViMotion::First);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_first_occupied() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = ' ';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = 'y';
term.grid_mut()[Line(0)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(1)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(2)][Column(0)].c = 'z';
term.grid_mut()[Line(2)][Column(1)].c = ' ';
let mut cursor = ViModeCursor::new(Point::new(Line(2), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(2), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn motion_high_middle_low() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::High);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Middle);
assert_eq!(cursor.point, Point::new(Line(9), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Low);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn motion_bracket() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = '(';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ')';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
fn motion_semantic_term() -> Term<VoidListener> {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = 'x';
term.grid_mut()[Line(0)][Column(3)].c = 'x';
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = ' ';
term.grid_mut()[Line(0)][Column(6)].c = ':';
term.grid_mut()[Line(0)][Column(7)].c = ' ';
term.grid_mut()[Line(0)][Column(8)].c = 'x';
term.grid_mut()[Line(0)][Column(9)].c = ':';
term.grid_mut()[Line(0)][Column(10)].c = 'x';
term.grid_mut()[Line(0)][Column(11)].c = ' ';
term.grid_mut()[Line(0)][Column(12)].c = ' ';
term.grid_mut()[Line(0)][Column(13)].c = ':';
term.grid_mut()[Line(0)][Column(14)].c = ' ';
term.grid_mut()[Line(0)][Column(15)].c = 'x';
term
}
#[test]
fn motion_semantic_right_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_semantic_right_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_semantic() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn semantic_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_word() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ';';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(4)].c = 'a';
term.grid_mut()[Line(0)][Column(5)].c = ';';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn scroll_word() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn word_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_simple() {
let mut term = term();
// Create 1 line of scrollback.
for _ in 0..20 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -1);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
}
#[test]
fn scroll_over_top() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-21), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
}
#[test]
fn scroll_over_bottom() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(-20), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn wide_semantic_char() {
let mut term = term();
term.set_semantic_escape_chars("-");
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = '-';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = 'x';
term.grid_mut()[Line(0)][Column(5)].c = 'x';
// Test motion to the right.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
// Test motion to the left.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "direction",
"type": "Direction"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "side",
"type": "Side"
}
],
"end_line": 324,
"name": "semantic",
"signature": "fn semantic(\n term: &Term<T>,\n mut point: Point,\n direction: Direction,\n side: Side,\n) -> Point",
"start_line": 272
} | {
"class_name": "",
"class_signature": ""
} |
word | alacritty-master/alacritty_terminal/src/vi_mode.rs | fn word(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
} | use std::cmp::min;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::EventListener;
use crate::grid::{Dimensions, GridCell};
use crate::index::{Boundary, Column, Direction, Line, Point, Side};
use crate::term::cell::Flags;
use crate::term::Term;
/// Possible vi mode motion movements.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum ViMotion {
/// Move up.
Up,
/// Move down.
Down,
/// Move left.
Left,
/// Move right.
Right,
/// First column, or beginning of the line when already at the first column.
First,
/// Last column, or beginning of the line when already at the last column.
Last,
/// First non-empty cell in this terminal row, or first non-empty cell
/// of the line when already at the first cell of the row.
FirstOccupied,
/// Move to top of screen.
High,
/// Move to center of screen.
Middle,
/// Move to bottom of screen.
Low,
/// Move to start of semantically separated word.
SemanticLeft,
/// Move to start of next semantically separated word.
SemanticRight,
/// Move to end of previous semantically separated word.
SemanticLeftEnd,
/// Move to end of semantically separated word.
SemanticRightEnd,
/// Move to start of whitespace separated word.
WordLeft,
/// Move to start of next whitespace separated word.
WordRight,
/// Move to end of previous whitespace separated word.
WordLeftEnd,
/// Move to end of whitespace separated word.
WordRightEnd,
/// Move to opposing bracket.
Bracket,
/// Move above the current paragraph.
ParagraphUp,
/// Move below the current paragraph.
ParagraphDown,
}
/// Cursor tracking vi mode position.
#[derive(Default, Copy, Clone, PartialEq, Eq)]
pub struct ViModeCursor {
pub point: Point,
}
impl ViModeCursor {
pub fn new(point: Point) -> Self {
Self { point }
}
/// Move vi mode cursor.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn motion<T: EventListener>(mut self, term: &mut Term<T>, motion: ViMotion) -> Self {
match motion {
ViMotion::Up => {
if self.point.line > term.topmost_line() {
self.point.line -= 1;
}
},
ViMotion::Down => {
if self.point.line + 1 < term.screen_lines() as i32 {
self.point.line += 1;
}
},
ViMotion::Left => {
self.point = term.expand_wide(self.point, Direction::Left);
let wrap_point = Point::new(self.point.line - 1, term.last_column());
if self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, wrap_point)
{
self.point = wrap_point;
} else {
self.point.column = Column(self.point.column.saturating_sub(1));
}
},
ViMotion::Right => {
self.point = term.expand_wide(self.point, Direction::Right);
if is_wrap(term, self.point) {
self.point = Point::new(self.point.line + 1, Column(0));
} else {
self.point.column = min(self.point.column + 1, term.last_column());
}
},
ViMotion::First => {
self.point = term.expand_wide(self.point, Direction::Left);
while self.point.column == 0
&& self.point.line > term.topmost_line()
&& is_wrap(term, Point::new(self.point.line - 1, term.last_column()))
{
self.point.line -= 1;
}
self.point.column = Column(0);
},
ViMotion::Last => self.point = last(term, self.point),
ViMotion::FirstOccupied => self.point = first_occupied(term, self.point),
ViMotion::High => {
let line = Line(-(term.grid().display_offset() as i32));
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Middle => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 / 2 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::Low => {
let display_offset = term.grid().display_offset() as i32;
let line = Line(-display_offset + term.screen_lines() as i32 - 1);
let col = first_occupied_in_line(term, line).unwrap_or_default().column;
self.point = Point::new(line, col);
},
ViMotion::SemanticLeft => {
self.point = semantic(term, self.point, Direction::Left, Side::Left);
},
ViMotion::SemanticRight => {
self.point = semantic(term, self.point, Direction::Right, Side::Left);
},
ViMotion::SemanticLeftEnd => {
self.point = semantic(term, self.point, Direction::Left, Side::Right);
},
ViMotion::SemanticRightEnd => {
self.point = semantic(term, self.point, Direction::Right, Side::Right);
},
ViMotion::WordLeft => {
self.point = word(term, self.point, Direction::Left, Side::Left);
},
ViMotion::WordRight => {
self.point = word(term, self.point, Direction::Right, Side::Left);
},
ViMotion::WordLeftEnd => {
self.point = word(term, self.point, Direction::Left, Side::Right);
},
ViMotion::WordRightEnd => {
self.point = word(term, self.point, Direction::Right, Side::Right);
},
ViMotion::Bracket => self.point = term.bracket_search(self.point).unwrap_or(self.point),
ViMotion::ParagraphUp => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let topmost_line = term.topmost_line();
self.point.line = (*topmost_line..*self.point.line)
.rev()
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(topmost_line, Line);
self.point.column = Column(0);
},
ViMotion::ParagraphDown => {
// Skip empty lines until we find the next paragraph,
// then skip over the paragraph until we reach the next empty line.
let bottommost_line = term.bottommost_line();
self.point.line = (*self.point.line..*bottommost_line)
.skip_while(|line| term.grid()[Line(*line)].is_clear())
.find(|line| term.grid()[Line(*line)].is_clear())
.map_or(bottommost_line, Line);
self.point.column = Column(0);
},
}
term.scroll_to_point(self.point);
self
}
/// Get target cursor point for vim-like page movement.
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn scroll<T: EventListener>(mut self, term: &Term<T>, lines: i32) -> Self {
// Clamp movement to within visible region.
let line = (self.point.line - lines).grid_clamp(term, Boundary::Grid);
// Find the first occupied cell after scrolling has been performed.
let column = first_occupied_in_line(term, line).unwrap_or_default().column;
// Move cursor.
self.point = Point::new(line, column);
self
}
}
/// Find next end of line to move to.
fn last<T>(term: &Term<T>, mut point: Point) -> Point {
// Expand across wide cells.
point = term.expand_wide(point, Direction::Right);
// Find last non-empty cell in the current line.
let occupied = last_occupied_in_line(term, point.line).unwrap_or_default();
if point.column < occupied.column {
// Jump to last occupied cell when not already at or beyond it.
occupied
} else if is_wrap(term, point) {
// Jump to last occupied cell across linewraps.
while is_wrap(term, point) {
point.line += 1;
}
last_occupied_in_line(term, point.line).unwrap_or(point)
} else {
// Jump to last column when beyond the last occupied cell.
Point::new(point.line, term.last_column())
}
}
/// Find next non-empty cell to move to.
fn first_occupied<T>(term: &Term<T>, mut point: Point) -> Point {
let last_column = term.last_column();
// Expand left across wide chars, since we're searching lines left to right.
point = term.expand_wide(point, Direction::Left);
// Find first non-empty cell in current line.
let occupied = first_occupied_in_line(term, point.line)
.unwrap_or_else(|| Point::new(point.line, last_column));
// Jump across wrapped lines if we're already at this line's first occupied cell.
if point == occupied {
let mut occupied = None;
// Search for non-empty cell in previous lines.
for line in (term.topmost_line().0..point.line.0).rev().map(Line::from) {
if !is_wrap(term, Point::new(line, last_column)) {
break;
}
occupied = first_occupied_in_line(term, line).or(occupied);
}
// Fallback to the next non-empty cell.
let mut line = point.line;
occupied.unwrap_or_else(|| loop {
if let Some(occupied) = first_occupied_in_line(term, line) {
break occupied;
}
let last_cell = Point::new(line, last_column);
if !is_wrap(term, last_cell) {
break last_cell;
}
line += 1;
})
} else {
occupied
}
}
/// Move by semantically separated word, like w/b/e/ge in vi.
fn semantic<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Expand semantically based on movement direction.
let expand_semantic = |point: Point| {
// Do not expand when currently on a semantic escape char.
let cell = &term.grid()[point];
if term.semantic_escape_chars().contains(cell.c)
&& !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
{
point
} else if direction == Direction::Left {
term.semantic_search_left(point)
} else {
term.semantic_search_right(point)
}
};
// Move to word boundary.
if direction != side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
// Skip whitespace.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Assure minimum movement of one cell.
if !is_boundary(term, point, direction) {
point = advance(term, point, direction);
// Skip over wide cell spacers.
if direction == Direction::Left {
point = term.expand_wide(point, direction);
}
}
// Move to word boundary.
if direction == side && !is_boundary(term, point, direction) {
point = expand_semantic(point);
}
point
}
/// Move by whitespace separated word, like W/B/E/gE in vi.
fn word<T: EventListener>(
term: &Term<T>,
mut point: Point,
direction: Direction,
side: Side,
) -> Point {
// Make sure we jump above wide chars.
point = term.expand_wide(point, direction);
if direction == side {
// Skip whitespace until right before a word.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
// Skip non-whitespace until right inside word boundary.
let mut next_point = advance(term, point, direction);
while !is_boundary(term, point, direction) && !is_space(term, next_point) {
point = next_point;
next_point = advance(term, point, direction);
}
}
if direction != side {
// Skip non-whitespace until just beyond word.
while !is_boundary(term, point, direction) && !is_space(term, point) {
point = advance(term, point, direction);
}
// Skip whitespace until right inside word boundary.
while !is_boundary(term, point, direction) && is_space(term, point) {
point = advance(term, point, direction);
}
}
point
}
/// Find first non-empty cell in line.
fn first_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.find(|&point| !is_space(term, point))
}
/// Find last non-empty cell in line.
fn last_occupied_in_line<T>(term: &Term<T>, line: Line) -> Option<Point> {
(0..term.columns())
.map(|col| Point::new(line, Column(col)))
.rfind(|&point| !is_space(term, point))
}
/// Advance point based on direction.
fn advance<T>(term: &Term<T>, point: Point, direction: Direction) -> Point {
if direction == Direction::Left {
point.sub(term, Boundary::Grid, 1)
} else {
point.add(term, Boundary::Grid, 1)
}
}
/// Check if cell at point contains whitespace.
fn is_space<T>(term: &Term<T>, point: Point) -> bool {
let cell = &term.grid()[point.line][point.column];
!cell.flags().intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER)
&& (cell.c == ' ' || cell.c == '\t')
}
/// Check if the cell at a point contains the WRAPLINE flag.
fn is_wrap<T>(term: &Term<T>, point: Point) -> bool {
term.grid()[point].flags.contains(Flags::WRAPLINE)
}
/// Check if point is at screen boundary.
fn is_boundary<T>(term: &Term<T>, point: Point, direction: Direction) -> bool {
(point.line <= term.topmost_line() && point.column == 0 && direction == Direction::Left)
|| (point.line == term.bottommost_line()
&& point.column + 1 >= term.columns()
&& direction == Direction::Right)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::VoidListener;
use crate::index::{Column, Line};
use crate::term::test::TermSize;
use crate::term::{Config, Term};
use crate::vte::ansi::Handler;
fn term() -> Term<VoidListener> {
let size = TermSize::new(20, 20);
Term::new(Config::default(), &size, VoidListener)
}
#[test]
fn motion_simple() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Down);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Up);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn simple_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = '汉';
term.grid_mut()[Line(0)][Column(1)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(3)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::Right);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Left);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_start_end() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Last);
assert_eq!(cursor.point, Point::new(Line(0), Column(19)));
cursor = cursor.motion(&mut term, ViMotion::First);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_first_occupied() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = ' ';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = 'y';
term.grid_mut()[Line(0)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(1)][Column(19)].flags.insert(Flags::WRAPLINE);
term.grid_mut()[Line(2)][Column(0)].c = 'z';
term.grid_mut()[Line(2)][Column(1)].c = ' ';
let mut cursor = ViModeCursor::new(Point::new(Line(2), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(2), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::FirstOccupied);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn motion_high_middle_low() {
let mut term = term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::High);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Middle);
assert_eq!(cursor.point, Point::new(Line(9), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Low);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn motion_bracket() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = '(';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = ')';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::Bracket);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
fn motion_semantic_term() -> Term<VoidListener> {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = 'x';
term.grid_mut()[Line(0)][Column(3)].c = 'x';
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = ' ';
term.grid_mut()[Line(0)][Column(6)].c = ':';
term.grid_mut()[Line(0)][Column(7)].c = ' ';
term.grid_mut()[Line(0)][Column(8)].c = 'x';
term.grid_mut()[Line(0)][Column(9)].c = ':';
term.grid_mut()[Line(0)][Column(10)].c = 'x';
term.grid_mut()[Line(0)][Column(11)].c = ' ';
term.grid_mut()[Line(0)][Column(12)].c = ' ';
term.grid_mut()[Line(0)][Column(13)].c = ':';
term.grid_mut()[Line(0)][Column(14)].c = ' ';
term.grid_mut()[Line(0)][Column(15)].c = 'x';
term
}
#[test]
fn motion_semantic_right_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_semantic_right_start() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(15)));
}
#[test]
fn motion_semantic_left_end() {
let mut term = motion_semantic_term();
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(15)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(13)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(10)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(9)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(8)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(6)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_semantic() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::SemanticLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::SemanticRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn semantic_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn motion_word() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ';';
term.grid_mut()[Line(0)][Column(2)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(4)].c = 'a';
term.grid_mut()[Line(0)][Column(5)].c = ';';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(0), Column(1)));
}
#[test]
fn scroll_word() {
let mut term = term();
term.grid_mut().scroll_up(&(Line(0)..Line(20)), 5);
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
cursor = cursor.motion(&mut term, ViMotion::WordLeftEnd);
assert_eq!(cursor.point, Point::new(Line(-5), Column(0)));
assert_eq!(term.grid().display_offset(), 5);
cursor = cursor.motion(&mut term, ViMotion::WordRightEnd);
assert_eq!(cursor.point, Point::new(Line(19), Column(19)));
assert_eq!(term.grid().display_offset(), 0);
}
#[test]
fn word_wide() {
let mut term = term();
term.grid_mut()[Line(0)][Column(0)].c = 'a';
term.grid_mut()[Line(0)][Column(1)].c = ' ';
term.grid_mut()[Line(0)][Column(2)].c = '汉';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = ' ';
term.grid_mut()[Line(0)][Column(5)].c = 'a';
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::WordRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(5)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(3)));
cursor = cursor.motion(&mut term, ViMotion::WordLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
#[test]
fn scroll_simple() {
let mut term = term();
// Create 1 line of scrollback.
for _ in 0..20 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -1);
assert_eq!(cursor.point, Point::new(Line(1), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, 1);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
}
#[test]
fn scroll_over_top() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-1), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-21), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, 20);
assert_eq!(cursor.point, Point::new(Line(-40), Column(0)));
}
#[test]
fn scroll_over_bottom() {
let mut term = term();
// Create 40 lines of scrollback.
for _ in 0..59 {
term.newline();
}
let mut cursor = ViModeCursor::new(Point::new(Line(-40), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(-20), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
cursor = cursor.scroll(&term, -20);
assert_eq!(cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn wide_semantic_char() {
let mut term = term();
term.set_semantic_escape_chars("-");
term.grid_mut()[Line(0)][Column(0)].c = 'x';
term.grid_mut()[Line(0)][Column(1)].c = 'x';
term.grid_mut()[Line(0)][Column(2)].c = '-';
term.grid_mut()[Line(0)][Column(2)].flags.insert(Flags::WIDE_CHAR);
term.grid_mut()[Line(0)][Column(3)].c = ' ';
term.grid_mut()[Line(0)][Column(3)].flags.insert(Flags::WIDE_CHAR_SPACER);
term.grid_mut()[Line(0)][Column(4)].c = 'x';
term.grid_mut()[Line(0)][Column(5)].c = 'x';
// Test motion to the right.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(0)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticRight);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
// Test motion to the left.
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(5)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(4)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(4)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(2)));
let mut cursor = ViModeCursor::new(Point::new(Line(0), Column(2)));
cursor = cursor.motion(&mut term, ViMotion::SemanticLeft);
assert_eq!(cursor.point, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "direction",
"type": "Direction"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "side",
"type": "Side"
}
],
"end_line": 365,
"name": "word",
"signature": "fn word(\n term: &Term<T>,\n mut point: Point,\n direction: Direction,\n side: Side,\n) -> Point",
"start_line": 327
} | {
"class_name": "",
"class_signature": ""
} |
grid_clamp | alacritty-master/alacritty_terminal/src/index.rs | pub fn grid_clamp(mut self, dimensions: &D, boundary: Boundary) -> Self {
let last_column = dimensions.last_column();
self.column = min(self.column, last_column);
let topmost_line = dimensions.topmost_line();
let bottommost_line = dimensions.bottommost_line();
match boundary {
Boundary::Cursor if self.line < 0 => Point::new(Line(0), Column(0)),
Boundary::Grid if self.line < topmost_line => Point::new(topmost_line, Column(0)),
Boundary::Cursor | Boundary::Grid if self.line > bottommost_line => {
Point::new(bottommost_line, last_column)
},
Boundary::None => {
self.line = self.line.grid_clamp(dimensions, boundary);
self
},
_ => self,
}
} | //! Line and Column newtypes for strongly typed tty/grid/terminal APIs.
/// Indexing types and implementations for Grid and Line.
use std::cmp::{max, min, Ord, Ordering};
use std::fmt;
use std::ops::{Add, AddAssign, Deref, Sub, SubAssign};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::grid::Dimensions;
/// The side of a cell.
pub type Side = Direction;
/// Horizontal direction.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Direction {
Left,
Right,
}
impl Direction {
#[must_use]
pub fn opposite(self) -> Self {
match self {
Side::Right => Side::Left,
Side::Left => Side::Right,
}
}
}
/// Terminal grid boundaries.
pub enum Boundary {
/// Cursor's range of motion in the grid.
///
/// This is equal to the viewport when the user isn't scrolled into the history.
Cursor,
/// Topmost line in history until the bottommost line in the terminal.
Grid,
/// Unbounded.
None,
}
/// Index in the grid using row, column notation.
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Point<L = Line, C = Column> {
pub line: L,
pub column: C,
}
impl<L, C> Point<L, C> {
pub fn new(line: L, column: C) -> Point<L, C> {
Point { line, column }
}
}
impl Point {
/// Subtract a number of columns from a point.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn sub<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self
where
D: Dimensions,
{
let cols = dimensions.columns();
let line_changes = (rhs + cols - 1).saturating_sub(self.column.0) / cols;
self.line -= line_changes;
self.column = Column((cols + self.column.0 - rhs % cols) % cols);
self.grid_clamp(dimensions, boundary)
}
/// Add a number of columns to a point.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn add<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self
where
D: Dimensions,
{
let cols = dimensions.columns();
self.line += (rhs + self.column.0) / cols;
self.column = Column((self.column.0 + rhs) % cols);
self.grid_clamp(dimensions, boundary)
}
/// Clamp a point to a grid boundary.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn grid_clamp<D>(mut self, dimensions: &D, boundary: Boundary) -> Self
where
D: Dimensions,
{
let last_column = dimensions.last_column();
self.column = min(self.column, last_column);
let topmost_line = dimensions.topmost_line();
let bottommost_line = dimensions.bottommost_line();
match boundary {
Boundary::Cursor if self.line < 0 => Point::new(Line(0), Column(0)),
Boundary::Grid if self.line < topmost_line => Point::new(topmost_line, Column(0)),
Boundary::Cursor | Boundary::Grid if self.line > bottommost_line => {
Point::new(bottommost_line, last_column)
},
Boundary::None => {
self.line = self.line.grid_clamp(dimensions, boundary);
self
},
_ => self,
}
}
}
impl<L: Ord, C: Ord> PartialOrd for Point<L, C> {
fn partial_cmp(&self, other: &Point<L, C>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<L: Ord, C: Ord> Ord for Point<L, C> {
fn cmp(&self, other: &Point<L, C>) -> Ordering {
match (self.line.cmp(&other.line), self.column.cmp(&other.column)) {
(Ordering::Equal, ord) | (ord, _) => ord,
}
}
}
/// A line.
///
/// Newtype to avoid passing values incorrectly.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Line(pub i32);
impl Line {
/// Clamp a line to a grid boundary.
#[must_use]
pub fn grid_clamp<D: Dimensions>(self, dimensions: &D, boundary: Boundary) -> Self {
match boundary {
Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),
Boundary::Grid => {
let bottommost_line = dimensions.bottommost_line();
let topmost_line = dimensions.topmost_line();
max(topmost_line, min(bottommost_line, self))
},
Boundary::None => {
let screen_lines = dimensions.screen_lines() as i32;
let total_lines = dimensions.total_lines() as i32;
if self >= screen_lines {
let topmost_line = dimensions.topmost_line();
let extra = (self.0 - screen_lines) % total_lines;
topmost_line + extra
} else {
let bottommost_line = dimensions.bottommost_line();
let extra = (self.0 - screen_lines + 1) % total_lines;
bottommost_line + extra
}
},
}
}
}
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<usize> for Line {
fn from(source: usize) -> Self {
Self(source as i32)
}
}
impl Add<usize> for Line {
type Output = Line;
#[inline]
fn add(self, rhs: usize) -> Line {
self + rhs as i32
}
}
impl AddAssign<usize> for Line {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self += rhs as i32;
}
}
impl Sub<usize> for Line {
type Output = Line;
#[inline]
fn sub(self, rhs: usize) -> Line {
self - rhs as i32
}
}
impl SubAssign<usize> for Line {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self -= rhs as i32;
}
}
impl PartialOrd<usize> for Line {
#[inline]
fn partial_cmp(&self, other: &usize) -> Option<Ordering> {
self.0.partial_cmp(&(*other as i32))
}
}
impl PartialEq<usize> for Line {
#[inline]
fn eq(&self, other: &usize) -> bool {
self.0.eq(&(*other as i32))
}
}
/// A column.
///
/// Newtype to avoid passing values incorrectly.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Column(pub usize);
impl fmt::Display for Column {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
macro_rules! ops {
($ty:ty, $construct:expr, $primitive:ty) => {
impl Deref for $ty {
type Target = $primitive;
#[inline]
fn deref(&self) -> &$primitive {
&self.0
}
}
impl From<$primitive> for $ty {
#[inline]
fn from(val: $primitive) -> $ty {
$construct(val)
}
}
impl Add<$ty> for $ty {
type Output = $ty;
#[inline]
fn add(self, rhs: $ty) -> $ty {
$construct(self.0 + rhs.0)
}
}
impl AddAssign<$ty> for $ty {
#[inline]
fn add_assign(&mut self, rhs: $ty) {
self.0 += rhs.0;
}
}
impl Add<$primitive> for $ty {
type Output = $ty;
#[inline]
fn add(self, rhs: $primitive) -> $ty {
$construct(self.0 + rhs)
}
}
impl AddAssign<$primitive> for $ty {
#[inline]
fn add_assign(&mut self, rhs: $primitive) {
self.0 += rhs
}
}
impl Sub<$ty> for $ty {
type Output = $ty;
#[inline]
fn sub(self, rhs: $ty) -> $ty {
$construct(self.0 - rhs.0)
}
}
impl SubAssign<$ty> for $ty {
#[inline]
fn sub_assign(&mut self, rhs: $ty) {
self.0 -= rhs.0;
}
}
impl Sub<$primitive> for $ty {
type Output = $ty;
#[inline]
fn sub(self, rhs: $primitive) -> $ty {
$construct(self.0 - rhs)
}
}
impl SubAssign<$primitive> for $ty {
#[inline]
fn sub_assign(&mut self, rhs: $primitive) {
self.0 -= rhs
}
}
impl PartialEq<$ty> for $primitive {
#[inline]
fn eq(&self, other: &$ty) -> bool {
self.eq(&other.0)
}
}
impl PartialEq<$primitive> for $ty {
#[inline]
fn eq(&self, other: &$primitive) -> bool {
self.0.eq(other)
}
}
impl PartialOrd<$ty> for $primitive {
#[inline]
fn partial_cmp(&self, other: &$ty) -> Option<Ordering> {
self.partial_cmp(&other.0)
}
}
impl PartialOrd<$primitive> for $ty {
#[inline]
fn partial_cmp(&self, other: &$primitive) -> Option<Ordering> {
self.0.partial_cmp(other)
}
}
};
}
ops!(Column, Column, usize);
ops!(Line, Line, i32);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn location_ordering() {
assert!(Point::new(Line(0), Column(0)) == Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(0)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(0), Column(1)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(0), Column(1)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(1), Column(0)));
assert!(Point::new(Line(0), Column(0)) > Point::new(Line(-1), Column(0)));
}
#[test]
fn sub() {
let size = (10, 42);
let point = Point::new(Line(0), Column(13));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), point.column - 1));
}
#[test]
fn sub_wrap() {
let size = (10, 42);
let point = Point::new(Line(1), Column(0));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), size.last_column()));
}
#[test]
fn sub_clamp() {
let size = (10, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, point);
}
#[test]
fn sub_grid_clamp() {
let size = (0, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::Grid, 1);
assert_eq!(result, point);
}
#[test]
fn sub_none_clamp() {
let size = (10, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::None, 1);
assert_eq!(result, Point::new(Line(9), Column(41)));
}
#[test]
fn add() {
let size = (10, 42);
let point = Point::new(Line(0), Column(13));
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), point.column + 1));
}
#[test]
fn add_wrap() {
let size = (10, 42);
let point = Point::new(Line(0), size.last_column());
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(1), Column(0)));
}
#[test]
fn add_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, point);
}
#[test]
fn add_grid_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::Grid, 1);
assert_eq!(result, point);
}
#[test]
fn add_none_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::None, 1);
assert_eq!(result, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum Boundary {\n /// Cursor's range of motion in the grid.\n ///\n /// This is equal to the viewport when the user isn't scrolled into the history.\n Cursor,\n\n /// Topmost line in history until the bottommost line in the terminal.\n Grid,\n\n /// Unbounded.\n None,\n}"
],
"name": "boundary",
"type": "Boundary"
}
],
"end_line": 114,
"name": "grid_clamp",
"signature": "pub fn grid_clamp(mut self, dimensions: &D, boundary: Boundary) -> Self",
"start_line": 92
} | {
"class_name": "impl Point {\n /// Subtract a number of columns from a point.\n #[inline]\n #[must_use = \"this returns the result of the operation, without modifying the original\"]\n pub fn sub<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self\n where\n D: Dimensions,\n {\n let cols = dimensions.columns();\n let line_changes = (rhs + cols - 1).saturating_sub(self.column.0) / cols;\n self.line -= line_changes;\n self.column = Column((cols + self.column.0 - rhs % cols) % cols);\n self.grid_clamp(dimensions, boundary)\n }\n\n /// Add a number of columns to a point.\n #[inline]\n #[must_use = \"this returns the result of the operation, without modifying the original\"]\n pub fn add<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self\n where\n D: Dimensions,\n {\n let cols = dimensions.columns();\n self.line += (rhs + self.column.0) / cols;\n self.column = Column((self.column.0 + rhs) % cols);\n self.grid_clamp(dimensions, boundary)\n }\n\n /// Clamp a point to a grid boundary.\n #[inline]\n #[must_use = \"this returns the result of the operation, without modifying the original\"]\n pub fn grid_clamp<D>(mut self, dimensions: &D, boundary: Boundary) -> Self\n where\n D: Dimensions,\n {\n let last_column = dimensions.last_column();\n self.column = min(self.column, last_column);\n\n let topmost_line = dimensions.topmost_line();\n let bottommost_line = dimensions.bottommost_line();\n\n match boundary {\n Boundary::Cursor if self.line < 0 => Point::new(Line(0), Column(0)),\n Boundary::Grid if self.line < topmost_line => Point::new(topmost_line, Column(0)),\n Boundary::Cursor | Boundary::Grid if self.line > bottommost_line => {\n Point::new(bottommost_line, last_column)\n },\n Boundary::None => {\n self.line = self.line.grid_clamp(dimensions, boundary);\n self\n },\n _ => self,\n }\n }\n}",
"class_signature": "impl Point"
} |
grid_clamp | alacritty-master/alacritty_terminal/src/index.rs | pub fn grid_clamp(self, dimensions: &D, boundary: Boundary) -> Self {
match boundary {
Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),
Boundary::Grid => {
let bottommost_line = dimensions.bottommost_line();
let topmost_line = dimensions.topmost_line();
max(topmost_line, min(bottommost_line, self))
},
Boundary::None => {
let screen_lines = dimensions.screen_lines() as i32;
let total_lines = dimensions.total_lines() as i32;
if self >= screen_lines {
let topmost_line = dimensions.topmost_line();
let extra = (self.0 - screen_lines) % total_lines;
topmost_line + extra
} else {
let bottommost_line = dimensions.bottommost_line();
let extra = (self.0 - screen_lines + 1) % total_lines;
bottommost_line + extra
}
},
}
} | //! Line and Column newtypes for strongly typed tty/grid/terminal APIs.
/// Indexing types and implementations for Grid and Line.
use std::cmp::{max, min, Ord, Ordering};
use std::fmt;
use std::ops::{Add, AddAssign, Deref, Sub, SubAssign};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::grid::Dimensions;
/// The side of a cell.
pub type Side = Direction;
/// Horizontal direction.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Direction {
Left,
Right,
}
impl Direction {
#[must_use]
pub fn opposite(self) -> Self {
match self {
Side::Right => Side::Left,
Side::Left => Side::Right,
}
}
}
/// Terminal grid boundaries.
pub enum Boundary {
/// Cursor's range of motion in the grid.
///
/// This is equal to the viewport when the user isn't scrolled into the history.
Cursor,
/// Topmost line in history until the bottommost line in the terminal.
Grid,
/// Unbounded.
None,
}
/// Index in the grid using row, column notation.
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Point<L = Line, C = Column> {
pub line: L,
pub column: C,
}
impl<L, C> Point<L, C> {
pub fn new(line: L, column: C) -> Point<L, C> {
Point { line, column }
}
}
impl Point {
/// Subtract a number of columns from a point.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn sub<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self
where
D: Dimensions,
{
let cols = dimensions.columns();
let line_changes = (rhs + cols - 1).saturating_sub(self.column.0) / cols;
self.line -= line_changes;
self.column = Column((cols + self.column.0 - rhs % cols) % cols);
self.grid_clamp(dimensions, boundary)
}
/// Add a number of columns to a point.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn add<D>(mut self, dimensions: &D, boundary: Boundary, rhs: usize) -> Self
where
D: Dimensions,
{
let cols = dimensions.columns();
self.line += (rhs + self.column.0) / cols;
self.column = Column((self.column.0 + rhs) % cols);
self.grid_clamp(dimensions, boundary)
}
/// Clamp a point to a grid boundary.
#[inline]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn grid_clamp<D>(mut self, dimensions: &D, boundary: Boundary) -> Self
where
D: Dimensions,
{
let last_column = dimensions.last_column();
self.column = min(self.column, last_column);
let topmost_line = dimensions.topmost_line();
let bottommost_line = dimensions.bottommost_line();
match boundary {
Boundary::Cursor if self.line < 0 => Point::new(Line(0), Column(0)),
Boundary::Grid if self.line < topmost_line => Point::new(topmost_line, Column(0)),
Boundary::Cursor | Boundary::Grid if self.line > bottommost_line => {
Point::new(bottommost_line, last_column)
},
Boundary::None => {
self.line = self.line.grid_clamp(dimensions, boundary);
self
},
_ => self,
}
}
}
impl<L: Ord, C: Ord> PartialOrd for Point<L, C> {
fn partial_cmp(&self, other: &Point<L, C>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<L: Ord, C: Ord> Ord for Point<L, C> {
fn cmp(&self, other: &Point<L, C>) -> Ordering {
match (self.line.cmp(&other.line), self.column.cmp(&other.column)) {
(Ordering::Equal, ord) | (ord, _) => ord,
}
}
}
/// A line.
///
/// Newtype to avoid passing values incorrectly.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Line(pub i32);
impl Line {
/// Clamp a line to a grid boundary.
#[must_use]
pub fn grid_clamp<D: Dimensions>(self, dimensions: &D, boundary: Boundary) -> Self {
match boundary {
Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),
Boundary::Grid => {
let bottommost_line = dimensions.bottommost_line();
let topmost_line = dimensions.topmost_line();
max(topmost_line, min(bottommost_line, self))
},
Boundary::None => {
let screen_lines = dimensions.screen_lines() as i32;
let total_lines = dimensions.total_lines() as i32;
if self >= screen_lines {
let topmost_line = dimensions.topmost_line();
let extra = (self.0 - screen_lines) % total_lines;
topmost_line + extra
} else {
let bottommost_line = dimensions.bottommost_line();
let extra = (self.0 - screen_lines + 1) % total_lines;
bottommost_line + extra
}
},
}
}
}
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<usize> for Line {
fn from(source: usize) -> Self {
Self(source as i32)
}
}
impl Add<usize> for Line {
type Output = Line;
#[inline]
fn add(self, rhs: usize) -> Line {
self + rhs as i32
}
}
impl AddAssign<usize> for Line {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self += rhs as i32;
}
}
impl Sub<usize> for Line {
type Output = Line;
#[inline]
fn sub(self, rhs: usize) -> Line {
self - rhs as i32
}
}
impl SubAssign<usize> for Line {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self -= rhs as i32;
}
}
impl PartialOrd<usize> for Line {
#[inline]
fn partial_cmp(&self, other: &usize) -> Option<Ordering> {
self.0.partial_cmp(&(*other as i32))
}
}
impl PartialEq<usize> for Line {
#[inline]
fn eq(&self, other: &usize) -> bool {
self.0.eq(&(*other as i32))
}
}
/// A column.
///
/// Newtype to avoid passing values incorrectly.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Column(pub usize);
impl fmt::Display for Column {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
macro_rules! ops {
($ty:ty, $construct:expr, $primitive:ty) => {
impl Deref for $ty {
type Target = $primitive;
#[inline]
fn deref(&self) -> &$primitive {
&self.0
}
}
impl From<$primitive> for $ty {
#[inline]
fn from(val: $primitive) -> $ty {
$construct(val)
}
}
impl Add<$ty> for $ty {
type Output = $ty;
#[inline]
fn add(self, rhs: $ty) -> $ty {
$construct(self.0 + rhs.0)
}
}
impl AddAssign<$ty> for $ty {
#[inline]
fn add_assign(&mut self, rhs: $ty) {
self.0 += rhs.0;
}
}
impl Add<$primitive> for $ty {
type Output = $ty;
#[inline]
fn add(self, rhs: $primitive) -> $ty {
$construct(self.0 + rhs)
}
}
impl AddAssign<$primitive> for $ty {
#[inline]
fn add_assign(&mut self, rhs: $primitive) {
self.0 += rhs
}
}
impl Sub<$ty> for $ty {
type Output = $ty;
#[inline]
fn sub(self, rhs: $ty) -> $ty {
$construct(self.0 - rhs.0)
}
}
impl SubAssign<$ty> for $ty {
#[inline]
fn sub_assign(&mut self, rhs: $ty) {
self.0 -= rhs.0;
}
}
impl Sub<$primitive> for $ty {
type Output = $ty;
#[inline]
fn sub(self, rhs: $primitive) -> $ty {
$construct(self.0 - rhs)
}
}
impl SubAssign<$primitive> for $ty {
#[inline]
fn sub_assign(&mut self, rhs: $primitive) {
self.0 -= rhs
}
}
impl PartialEq<$ty> for $primitive {
#[inline]
fn eq(&self, other: &$ty) -> bool {
self.eq(&other.0)
}
}
impl PartialEq<$primitive> for $ty {
#[inline]
fn eq(&self, other: &$primitive) -> bool {
self.0.eq(other)
}
}
impl PartialOrd<$ty> for $primitive {
#[inline]
fn partial_cmp(&self, other: &$ty) -> Option<Ordering> {
self.partial_cmp(&other.0)
}
}
impl PartialOrd<$primitive> for $ty {
#[inline]
fn partial_cmp(&self, other: &$primitive) -> Option<Ordering> {
self.0.partial_cmp(other)
}
}
};
}
ops!(Column, Column, usize);
ops!(Line, Line, i32);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn location_ordering() {
assert!(Point::new(Line(0), Column(0)) == Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(0)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(0), Column(1)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(0), Column(0)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(0), Column(1)));
assert!(Point::new(Line(1), Column(1)) > Point::new(Line(1), Column(0)));
assert!(Point::new(Line(0), Column(0)) > Point::new(Line(-1), Column(0)));
}
#[test]
fn sub() {
let size = (10, 42);
let point = Point::new(Line(0), Column(13));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), point.column - 1));
}
#[test]
fn sub_wrap() {
let size = (10, 42);
let point = Point::new(Line(1), Column(0));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), size.last_column()));
}
#[test]
fn sub_clamp() {
let size = (10, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::Cursor, 1);
assert_eq!(result, point);
}
#[test]
fn sub_grid_clamp() {
let size = (0, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::Grid, 1);
assert_eq!(result, point);
}
#[test]
fn sub_none_clamp() {
let size = (10, 42);
let point = Point::new(Line(0), Column(0));
let result = point.sub(&size, Boundary::None, 1);
assert_eq!(result, Point::new(Line(9), Column(41)));
}
#[test]
fn add() {
let size = (10, 42);
let point = Point::new(Line(0), Column(13));
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(0), point.column + 1));
}
#[test]
fn add_wrap() {
let size = (10, 42);
let point = Point::new(Line(0), size.last_column());
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, Point::new(Line(1), Column(0)));
}
#[test]
fn add_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::Cursor, 1);
assert_eq!(result, point);
}
#[test]
fn add_grid_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::Grid, 1);
assert_eq!(result, point);
}
#[test]
fn add_none_clamp() {
let size = (10, 42);
let point = Point::new(Line(9), Column(41));
let result = point.add(&size, Boundary::None, 1);
assert_eq!(result, Point::new(Line(0), Column(0)));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum Boundary {\n /// Cursor's range of motion in the grid.\n ///\n /// This is equal to the viewport when the user isn't scrolled into the history.\n Cursor,\n\n /// Topmost line in history until the bottommost line in the terminal.\n Grid,\n\n /// Unbounded.\n None,\n}"
],
"name": "boundary",
"type": "Boundary"
}
],
"end_line": 164,
"name": "grid_clamp",
"signature": "pub fn grid_clamp(self, dimensions: &D, boundary: Boundary) -> Self",
"start_line": 141
} | {
"class_name": "impl Line {\n /// Clamp a line to a grid boundary.\n #[must_use]\n pub fn grid_clamp<D: Dimensions>(self, dimensions: &D, boundary: Boundary) -> Self {\n match boundary {\n Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),\n Boundary::Grid => {\n let bottommost_line = dimensions.bottommost_line();\n let topmost_line = dimensions.topmost_line();\n max(topmost_line, min(bottommost_line, self))\n },\n Boundary::None => {\n let screen_lines = dimensions.screen_lines() as i32;\n let total_lines = dimensions.total_lines() as i32;\n\n if self >= screen_lines {\n let topmost_line = dimensions.topmost_line();\n let extra = (self.0 - screen_lines) % total_lines;\n topmost_line + extra\n } else {\n let bottommost_line = dimensions.bottommost_line();\n let extra = (self.0 - screen_lines + 1) % total_lines;\n bottommost_line + extra\n }\n },\n }\n }\n}",
"class_signature": "impl Line"
} |
new | alacritty-master/alacritty_terminal/src/grid/row.rs | pub fn new(columns: usize) -> Row<T> {
debug_assert!(columns >= 1);
let mut inner: Vec<T> = Vec::with_capacity(columns);
// This is a slightly optimized version of `std::vec::Vec::resize`.
unsafe {
let mut ptr = inner.as_mut_ptr();
for _ in 1..columns {
ptr::write(ptr, T::default());
ptr = ptr.offset(1);
}
ptr::write(ptr, T::default());
inner.set_len(columns);
}
Row { inner, occ: 0 }
} | //! Defines the Row type which makes up lines in the grid.
use std::cmp::{max, min};
use std::ops::{Index, IndexMut, Range, RangeFrom, RangeFull, RangeTo, RangeToInclusive};
use std::{ptr, slice};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::grid::GridCell;
use crate::index::Column;
use crate::term::cell::ResetDiscriminant;
/// A row in the grid.
#[derive(Default, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Row<T> {
inner: Vec<T>,
/// Maximum number of occupied entries.
///
/// This is the upper bound on the number of elements in the row, which have been modified
/// since the last reset. All cells after this point are guaranteed to be equal.
pub(crate) occ: usize,
}
impl<T: PartialEq> PartialEq for Row<T> {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl<T: Default> Row<T> {
/// Create a new terminal row.
///
/// Ideally the `template` should be `Copy` in all performance sensitive scenarios.
pub fn new(columns: usize) -> Row<T> {
debug_assert!(columns >= 1);
let mut inner: Vec<T> = Vec::with_capacity(columns);
// This is a slightly optimized version of `std::vec::Vec::resize`.
unsafe {
let mut ptr = inner.as_mut_ptr();
for _ in 1..columns {
ptr::write(ptr, T::default());
ptr = ptr.offset(1);
}
ptr::write(ptr, T::default());
inner.set_len(columns);
}
Row { inner, occ: 0 }
}
/// Increase the number of columns in the row.
#[inline]
pub fn grow(&mut self, columns: usize) {
if self.inner.len() >= columns {
return;
}
self.inner.resize_with(columns, T::default);
}
/// Reduce the number of columns in the row.
///
/// This will return all non-empty cells that were removed.
pub fn shrink(&mut self, columns: usize) -> Option<Vec<T>>
where
T: GridCell,
{
if self.inner.len() <= columns {
return None;
}
// Split off cells for a new row.
let mut new_row = self.inner.split_off(columns);
let index = new_row.iter().rposition(|c| !c.is_empty()).map_or(0, |i| i + 1);
new_row.truncate(index);
self.occ = min(self.occ, columns);
if new_row.is_empty() {
None
} else {
Some(new_row)
}
}
/// Reset all cells in the row to the `template` cell.
#[inline]
pub fn reset<D>(&mut self, template: &T)
where
T: ResetDiscriminant<D> + GridCell,
D: PartialEq,
{
debug_assert!(!self.inner.is_empty());
// Mark all cells as dirty if template cell changed.
let len = self.inner.len();
if self.inner[len - 1].discriminant() != template.discriminant() {
self.occ = len;
}
// Reset every dirty cell in the row.
for item in &mut self.inner[0..self.occ] {
item.reset(template);
}
self.occ = 0;
}
}
#[allow(clippy::len_without_is_empty)]
impl<T> Row<T> {
#[inline]
pub fn from_vec(vec: Vec<T>, occ: usize) -> Row<T> {
Row { inner: vec, occ }
}
#[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
#[inline]
pub fn last(&self) -> Option<&T> {
self.inner.last()
}
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
self.occ = self.inner.len();
self.inner.last_mut()
}
#[inline]
pub fn append(&mut self, vec: &mut Vec<T>)
where
T: GridCell,
{
self.occ += vec.len();
self.inner.append(vec);
}
#[inline]
pub fn append_front(&mut self, mut vec: Vec<T>) {
self.occ += vec.len();
vec.append(&mut self.inner);
self.inner = vec;
}
/// Check if all cells in the row are empty.
#[inline]
pub fn is_clear(&self) -> bool
where
T: GridCell,
{
self.inner.iter().all(GridCell::is_empty)
}
#[inline]
pub fn front_split_off(&mut self, at: usize) -> Vec<T> {
self.occ = self.occ.saturating_sub(at);
let mut split = self.inner.split_off(at);
std::mem::swap(&mut split, &mut self.inner);
split
}
}
impl<'a, T> IntoIterator for &'a Row<T> {
type IntoIter = slice::Iter<'a, T>;
type Item = &'a T;
#[inline]
fn into_iter(self) -> slice::Iter<'a, T> {
self.inner.iter()
}
}
impl<'a, T> IntoIterator for &'a mut Row<T> {
type IntoIter = slice::IterMut<'a, T>;
type Item = &'a mut T;
#[inline]
fn into_iter(self) -> slice::IterMut<'a, T> {
self.occ = self.len();
self.inner.iter_mut()
}
}
impl<T> Index<Column> for Row<T> {
type Output = T;
#[inline]
fn index(&self, index: Column) -> &T {
&self.inner[index.0]
}
}
impl<T> IndexMut<Column> for Row<T> {
#[inline]
fn index_mut(&mut self, index: Column) -> &mut T {
self.occ = max(self.occ, *index + 1);
&mut self.inner[index.0]
}
}
impl<T> Index<Range<Column>> for Row<T> {
type Output = [T];
#[inline]
fn index(&self, index: Range<Column>) -> &[T] {
&self.inner[(index.start.0)..(index.end.0)]
}
}
impl<T> IndexMut<Range<Column>> for Row<T> {
#[inline]
fn index_mut(&mut self, index: Range<Column>) -> &mut [T] {
self.occ = max(self.occ, *index.end);
&mut self.inner[(index.start.0)..(index.end.0)]
}
}
impl<T> Index<RangeTo<Column>> for Row<T> {
type Output = [T];
#[inline]
fn index(&self, index: RangeTo<Column>) -> &[T] {
&self.inner[..(index.end.0)]
}
}
impl<T> IndexMut<RangeTo<Column>> for Row<T> {
#[inline]
fn index_mut(&mut self, index: RangeTo<Column>) -> &mut [T] {
self.occ = max(self.occ, *index.end);
&mut self.inner[..(index.end.0)]
}
}
impl<T> Index<RangeFrom<Column>> for Row<T> {
type Output = [T];
#[inline]
fn index(&self, index: RangeFrom<Column>) -> &[T] {
&self.inner[(index.start.0)..]
}
}
impl<T> IndexMut<RangeFrom<Column>> for Row<T> {
#[inline]
fn index_mut(&mut self, index: RangeFrom<Column>) -> &mut [T] {
self.occ = self.len();
&mut self.inner[(index.start.0)..]
}
}
impl<T> Index<RangeFull> for Row<T> {
type Output = [T];
#[inline]
fn index(&self, _: RangeFull) -> &[T] {
&self.inner[..]
}
}
impl<T> IndexMut<RangeFull> for Row<T> {
#[inline]
fn index_mut(&mut self, _: RangeFull) -> &mut [T] {
self.occ = self.len();
&mut self.inner[..]
}
}
impl<T> Index<RangeToInclusive<Column>> for Row<T> {
type Output = [T];
#[inline]
fn index(&self, index: RangeToInclusive<Column>) -> &[T] {
&self.inner[..=(index.end.0)]
}
}
impl<T> IndexMut<RangeToInclusive<Column>> for Row<T> {
#[inline]
fn index_mut(&mut self, index: RangeToInclusive<Column>) -> &mut [T] {
self.occ = max(self.occ, *index.end + 1);
&mut self.inner[..=(index.end.0)]
}
}
| rust | {
"argument_definitions": [],
"end_line": 56,
"name": "new",
"signature": "pub fn new(columns: usize) -> Row<T>",
"start_line": 37
} | {
"class_name": "impl<T: Default> Row<T> {\n /// Create a new terminal row.\n ///\n /// Ideally the `template` should be `Copy` in all performance sensitive scenarios.\n pub fn new(columns: usize) -> Row<T> {\n debug_assert!(columns >= 1);\n\n let mut inner: Vec<T> = Vec::with_capacity(columns);\n\n // This is a slightly optimized version of `std::vec::Vec::resize`.\n unsafe {\n let mut ptr = inner.as_mut_ptr();\n\n for _ in 1..columns {\n ptr::write(ptr, T::default());\n ptr = ptr.offset(1);\n }\n ptr::write(ptr, T::default());\n\n inner.set_len(columns);\n }\n\n Row { inner, occ: 0 }\n }\n\n /// Increase the number of columns in the row.\n #[inline]\n pub fn grow(&mut self, columns: usize) {\n if self.inner.len() >= columns {\n return;\n }\n\n self.inner.resize_with(columns, T::default);\n }\n\n /// Reduce the number of columns in the row.\n ///\n /// This will return all non-empty cells that were removed.\n pub fn shrink(&mut self, columns: usize) -> Option<Vec<T>>\n where\n T: GridCell,\n {\n if self.inner.len() <= columns {\n return None;\n }\n\n // Split off cells for a new row.\n let mut new_row = self.inner.split_off(columns);\n let index = new_row.iter().rposition(|c| !c.is_empty()).map_or(0, |i| i + 1);\n new_row.truncate(index);\n\n self.occ = min(self.occ, columns);\n\n if new_row.is_empty() {\n None\n } else {\n Some(new_row)\n }\n }\n\n /// Reset all cells in the row to the `template` cell.\n #[inline]\n pub fn reset<D>(&mut self, template: &T)\n where\n T: ResetDiscriminant<D> + GridCell,\n D: PartialEq,\n {\n debug_assert!(!self.inner.is_empty());\n\n // Mark all cells as dirty if template cell changed.\n let len = self.inner.len();\n if self.inner[len - 1].discriminant() != template.discriminant() {\n self.occ = len;\n }\n\n // Reset every dirty cell in the row.\n for item in &mut self.inner[0..self.occ] {\n item.reset(template);\n }\n\n self.occ = 0;\n }\n}",
"class_signature": "impl<T: Default> Row<T>"
} |
from | alacritty-master/alacritty_terminal/src/term/mod.rs | fn from(value: KeyboardModes) -> Self {
let mut mode = Self::empty();
let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);
mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);
let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);
mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);
let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);
mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);
let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);
mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);
let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);
mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);
mode
} | //! Exports the `Term` type which is a high-level API for the Grid.
use std::ops::{Index, IndexMut, Range};
use std::sync::Arc;
use std::{cmp, mem, ptr, slice, str};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use base64::engine::general_purpose::STANDARD as Base64;
use base64::Engine;
use bitflags::bitflags;
use log::{debug, trace};
use unicode_width::UnicodeWidthChar;
use crate::event::{Event, EventListener};
use crate::grid::{Dimensions, Grid, GridIterator, Scroll};
use crate::index::{self, Boundary, Column, Direction, Line, Point, Side};
use crate::selection::{Selection, SelectionRange, SelectionType};
use crate::term::cell::{Cell, Flags, LineLength};
use crate::term::color::Colors;
use crate::vi_mode::{ViModeCursor, ViMotion};
use crate::vte::ansi::{
self, Attr, CharsetIndex, Color, CursorShape, CursorStyle, Handler, Hyperlink, KeyboardModes,
KeyboardModesApplyBehavior, NamedColor, NamedMode, NamedPrivateMode, PrivateMode, Rgb,
StandardCharset,
};
pub mod cell;
pub mod color;
pub mod search;
/// Minimum number of columns.
///
/// A minimum of 2 is necessary to hold fullwidth unicode characters.
pub const MIN_COLUMNS: usize = 2;
/// Minimum number of visible lines.
pub const MIN_SCREEN_LINES: usize = 1;
/// Max size of the window title stack.
const TITLE_STACK_MAX_DEPTH: usize = 4096;
/// Default semantic escape characters.
pub const SEMANTIC_ESCAPE_CHARS: &str = ",│`|:\"' ()[]{}<>\t";
/// Max size of the keyboard modes.
const KEYBOARD_MODE_STACK_MAX_DEPTH: usize = TITLE_STACK_MAX_DEPTH;
/// Default tab interval, corresponding to terminfo `it` value.
const INITIAL_TABSTOPS: usize = 8;
bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct TermMode: u32 {
const NONE = 0;
const SHOW_CURSOR = 1;
const APP_CURSOR = 1 << 1;
const APP_KEYPAD = 1 << 2;
const MOUSE_REPORT_CLICK = 1 << 3;
const BRACKETED_PASTE = 1 << 4;
const SGR_MOUSE = 1 << 5;
const MOUSE_MOTION = 1 << 6;
const LINE_WRAP = 1 << 7;
const LINE_FEED_NEW_LINE = 1 << 8;
const ORIGIN = 1 << 9;
const INSERT = 1 << 10;
const FOCUS_IN_OUT = 1 << 11;
const ALT_SCREEN = 1 << 12;
const MOUSE_DRAG = 1 << 13;
const UTF8_MOUSE = 1 << 14;
const ALTERNATE_SCROLL = 1 << 15;
const VI = 1 << 16;
const URGENCY_HINTS = 1 << 17;
const DISAMBIGUATE_ESC_CODES = 1 << 18;
const REPORT_EVENT_TYPES = 1 << 19;
const REPORT_ALTERNATE_KEYS = 1 << 20;
const REPORT_ALL_KEYS_AS_ESC = 1 << 21;
const REPORT_ASSOCIATED_TEXT = 1 << 22;
const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();
const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()
| Self::REPORT_EVENT_TYPES.bits()
| Self::REPORT_ALTERNATE_KEYS.bits()
| Self::REPORT_ALL_KEYS_AS_ESC.bits()
| Self::REPORT_ASSOCIATED_TEXT.bits();
const ANY = u32::MAX;
}
}
impl From<KeyboardModes> for TermMode {
fn from(value: KeyboardModes) -> Self {
let mut mode = Self::empty();
let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);
mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);
let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);
mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);
let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);
mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);
let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);
mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);
let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);
mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);
mode
}
}
impl Default for TermMode {
fn default() -> TermMode {
TermMode::SHOW_CURSOR
| TermMode::LINE_WRAP
| TermMode::ALTERNATE_SCROLL
| TermMode::URGENCY_HINTS
}
}
/// Convert a terminal point to a viewport relative point.
#[inline]
pub fn point_to_viewport(display_offset: usize, point: Point) -> Option<Point<usize>> {
let viewport_line = point.line.0 + display_offset as i32;
usize::try_from(viewport_line).ok().map(|line| Point::new(line, point.column))
}
/// Convert a viewport relative point to a terminal point.
#[inline]
pub fn viewport_to_point(display_offset: usize, point: Point<usize>) -> Point {
let line = Line(point.line as i32) - display_offset;
Point::new(line, point.column)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LineDamageBounds {
/// Damaged line number.
pub line: usize,
/// Leftmost damaged column.
pub left: usize,
/// Rightmost damaged column.
pub right: usize,
}
impl LineDamageBounds {
#[inline]
pub fn new(line: usize, left: usize, right: usize) -> Self {
Self { line, left, right }
}
#[inline]
pub fn undamaged(line: usize, num_cols: usize) -> Self {
Self { line, left: num_cols, right: 0 }
}
#[inline]
pub fn reset(&mut self, num_cols: usize) {
*self = Self::undamaged(self.line, num_cols);
}
#[inline]
pub fn expand(&mut self, left: usize, right: usize) {
self.left = cmp::min(self.left, left);
self.right = cmp::max(self.right, right);
}
#[inline]
pub fn is_damaged(&self) -> bool {
self.left <= self.right
}
}
/// Terminal damage information collected since the last [`Term::reset_damage`] call.
#[derive(Debug)]
pub enum TermDamage<'a> {
/// The entire terminal is damaged.
Full,
/// Iterator over damaged lines in the terminal.
Partial(TermDamageIterator<'a>),
}
/// Iterator over the terminal's viewport damaged lines.
#[derive(Clone, Debug)]
pub struct TermDamageIterator<'a> {
line_damage: slice::Iter<'a, LineDamageBounds>,
display_offset: usize,
}
impl<'a> TermDamageIterator<'a> {
pub fn new(line_damage: &'a [LineDamageBounds], display_offset: usize) -> Self {
let num_lines = line_damage.len();
// Filter out invisible damage.
let line_damage = &line_damage[..num_lines.saturating_sub(display_offset)];
Self { display_offset, line_damage: line_damage.iter() }
}
}
impl Iterator for TermDamageIterator<'_> {
type Item = LineDamageBounds;
fn next(&mut self) -> Option<Self::Item> {
self.line_damage.find_map(|line| {
line.is_damaged().then_some(LineDamageBounds::new(
line.line + self.display_offset,
line.left,
line.right,
))
})
}
}
/// State of the terminal damage.
struct TermDamageState {
/// Hint whether terminal should be damaged entirely regardless of the actual damage changes.
full: bool,
/// Information about damage on terminal lines.
lines: Vec<LineDamageBounds>,
/// Old terminal cursor point.
last_cursor: Point,
}
impl TermDamageState {
fn new(num_cols: usize, num_lines: usize) -> Self {
let lines =
(0..num_lines).map(|line| LineDamageBounds::undamaged(line, num_cols)).collect();
Self { full: true, lines, last_cursor: Default::default() }
}
#[inline]
fn resize(&mut self, num_cols: usize, num_lines: usize) {
// Reset point, so old cursor won't end up outside of the viewport.
self.last_cursor = Default::default();
self.full = true;
self.lines.clear();
self.lines.reserve(num_lines);
for line in 0..num_lines {
self.lines.push(LineDamageBounds::undamaged(line, num_cols));
}
}
/// Damage point inside of the viewport.
#[inline]
fn damage_point(&mut self, point: Point<usize>) {
self.damage_line(point.line, point.column.0, point.column.0);
}
/// Expand `line`'s damage to span at least `left` to `right` column.
#[inline]
fn damage_line(&mut self, line: usize, left: usize, right: usize) {
self.lines[line].expand(left, right);
}
/// Reset information about terminal damage.
fn reset(&mut self, num_cols: usize) {
self.full = false;
self.lines.iter_mut().for_each(|line| line.reset(num_cols));
}
}
pub struct Term<T> {
/// Terminal focus controlling the cursor shape.
pub is_focused: bool,
/// Cursor for keyboard selection.
pub vi_mode_cursor: ViModeCursor,
pub selection: Option<Selection>,
/// Currently active grid.
///
/// Tracks the screen buffer currently in use. While the alternate screen buffer is active,
/// this will be the alternate grid. Otherwise it is the primary screen buffer.
grid: Grid<Cell>,
/// Currently inactive grid.
///
/// Opposite of the active grid. While the alternate screen buffer is active, this will be the
/// primary grid. Otherwise it is the alternate screen buffer.
inactive_grid: Grid<Cell>,
/// Index into `charsets`, pointing to what ASCII is currently being mapped to.
active_charset: CharsetIndex,
/// Tabstops.
tabs: TabStops,
/// Mode flags.
mode: TermMode,
/// Scroll region.
///
/// Range going from top to bottom of the terminal, indexed from the top of the viewport.
scroll_region: Range<Line>,
/// Modified terminal colors.
colors: Colors,
/// Current style of the cursor.
cursor_style: Option<CursorStyle>,
/// Proxy for sending events to the event loop.
event_proxy: T,
/// Current title of the window.
title: Option<String>,
/// Stack of saved window titles. When a title is popped from this stack, the `title` for the
/// term is set.
title_stack: Vec<Option<String>>,
/// The stack for the keyboard modes.
keyboard_mode_stack: Vec<KeyboardModes>,
/// Currently inactive keyboard mode stack.
inactive_keyboard_mode_stack: Vec<KeyboardModes>,
/// Information about damaged cells.
damage: TermDamageState,
/// Config directly for the terminal.
config: Config,
}
/// Configuration options for the [`Term`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
/// The maximum amount of scrolling history.
pub scrolling_history: usize,
/// Default cursor style to reset the cursor to.
pub default_cursor_style: CursorStyle,
/// Cursor style for Vi mode.
pub vi_mode_cursor_style: Option<CursorStyle>,
/// The characters which terminate semantic selection.
///
/// The default value is [`SEMANTIC_ESCAPE_CHARS`].
pub semantic_escape_chars: String,
/// Whether to enable kitty keyboard protocol.
pub kitty_keyboard: bool,
/// OSC52 support mode.
pub osc52: Osc52,
}
impl Default for Config {
fn default() -> Self {
Self {
scrolling_history: 10000,
semantic_escape_chars: SEMANTIC_ESCAPE_CHARS.to_owned(),
default_cursor_style: Default::default(),
vi_mode_cursor_style: Default::default(),
kitty_keyboard: Default::default(),
osc52: Default::default(),
}
}
}
/// OSC 52 behavior.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum Osc52 {
/// The handling of the escape sequence is disabled.
Disabled,
/// Only copy sequence is accepted.
///
/// This option is the default as a compromise between entirely
/// disabling it (the most secure) and allowing `paste` (the less secure).
#[default]
OnlyCopy,
/// Only paste sequence is accepted.
OnlyPaste,
/// Both are accepted.
CopyPaste,
}
impl<T> Term<T> {
#[inline]
pub fn scroll_display(&mut self, scroll: Scroll)
where
T: EventListener,
{
let old_display_offset = self.grid.display_offset();
self.grid.scroll_display(scroll);
self.event_proxy.send_event(Event::MouseCursorDirty);
// Clamp vi mode cursor to the viewport.
let viewport_start = -(self.grid.display_offset() as i32);
let viewport_end = viewport_start + self.bottommost_line().0;
let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;
*vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));
self.vi_mode_recompute_selection();
// Damage everything if display offset changed.
if old_display_offset != self.grid().display_offset() {
self.mark_fully_damaged();
}
}
pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {
let num_cols = dimensions.columns();
let num_lines = dimensions.screen_lines();
let history_size = config.scrolling_history;
let grid = Grid::new(num_lines, num_cols, history_size);
let inactive_grid = Grid::new(num_lines, num_cols, 0);
let tabs = TabStops::new(grid.columns());
let scroll_region = Line(0)..Line(grid.screen_lines() as i32);
// Initialize terminal damage, covering the entire terminal upon launch.
let damage = TermDamageState::new(num_cols, num_lines);
Term {
inactive_grid,
scroll_region,
event_proxy,
damage,
config,
grid,
tabs,
inactive_keyboard_mode_stack: Default::default(),
keyboard_mode_stack: Default::default(),
active_charset: Default::default(),
vi_mode_cursor: Default::default(),
cursor_style: Default::default(),
colors: color::Colors::default(),
title_stack: Default::default(),
is_focused: Default::default(),
selection: Default::default(),
title: Default::default(),
mode: Default::default(),
}
}
/// Collect the information about the changes in the lines, which
/// could be used to minimize the amount of drawing operations.
///
/// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the
/// collected damage state. Those could easily be tracked by comparing their old and new
/// value between adjacent frames.
///
/// After reading damage [`reset_damage`] should be called.
///
/// [`reset_damage`]: Self::reset_damage
#[must_use]
pub fn damage(&mut self) -> TermDamage<'_> {
// Ensure the entire terminal is damaged after entering insert mode.
// Leaving is handled in the ansi handler.
if self.mode.contains(TermMode::INSERT) {
self.mark_fully_damaged();
}
let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);
if self.damage.full {
return TermDamage::Full;
}
// Add information about old cursor position and new one if they are not the same, so we
// cover everything that was produced by `Term::input`.
if self.damage.last_cursor != previous_cursor {
// Cursor coordinates are always inside viewport even if you have `display_offset`.
let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);
self.damage.damage_point(point);
}
// Always damage current cursor.
self.damage_cursor();
// NOTE: damage which changes all the content when the display offset is non-zero (e.g.
// scrolling) is handled via full damage.
let display_offset = self.grid().display_offset();
TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))
}
/// Resets the terminal damage information.
pub fn reset_damage(&mut self) {
self.damage.reset(self.columns());
}
#[inline]
fn mark_fully_damaged(&mut self) {
self.damage.full = true;
}
/// Set new options for the [`Term`].
pub fn set_options(&mut self, options: Config)
where
T: EventListener,
{
let old_config = mem::replace(&mut self.config, options);
let title_event = match &self.title {
Some(title) => Event::Title(title.clone()),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
if self.mode.contains(TermMode::ALT_SCREEN) {
self.inactive_grid.update_history(self.config.scrolling_history);
} else {
self.grid.update_history(self.config.scrolling_history);
}
if self.config.kitty_keyboard != old_config.kitty_keyboard {
self.keyboard_mode_stack = Vec::new();
self.inactive_keyboard_mode_stack = Vec::new();
self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);
}
// Damage everything on config updates.
self.mark_fully_damaged();
}
/// Convert the active selection to a String.
pub fn selection_to_string(&self) -> Option<String> {
let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;
let SelectionRange { start, end, .. } = selection_range;
let mut res = String::new();
match self.selection.as_ref() {
Some(Selection { ty: SelectionType::Block, .. }) => {
for line in (start.line.0..end.line.0).map(Line::from) {
res += self
.line_to_string(line, start.column..end.column, start.column.0 != 0)
.trim_end();
res += "\n";
}
res += self.line_to_string(end.line, start.column..end.column, true).trim_end();
},
Some(Selection { ty: SelectionType::Lines, .. }) => {
res = self.bounds_to_string(start, end) + "\n";
},
_ => {
res = self.bounds_to_string(start, end);
},
}
Some(res)
}
/// Convert range between two points to a String.
pub fn bounds_to_string(&self, start: Point, end: Point) -> String {
let mut res = String::new();
for line in (start.line.0..=end.line.0).map(Line::from) {
let start_col = if line == start.line { start.column } else { Column(0) };
let end_col = if line == end.line { end.column } else { self.last_column() };
res += &self.line_to_string(line, start_col..end_col, line == end.line);
}
res.strip_suffix('\n').map(str::to_owned).unwrap_or(res)
}
/// Convert a single line in the grid to a String.
fn line_to_string(
&self,
line: Line,
mut cols: Range<Column>,
include_wrapped_wide: bool,
) -> String {
let mut text = String::new();
let grid_line = &self.grid[line];
let line_length = cmp::min(grid_line.line_length(), cols.end + 1);
// Include wide char when trailing spacer is selected.
if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {
cols.start -= 1;
}
let mut tab_mode = false;
for column in (cols.start.0..line_length.0).map(Column::from) {
let cell = &grid_line[column];
// Skip over cells until next tab-stop once a tab was found.
if tab_mode {
if self.tabs[column] || cell.c != ' ' {
tab_mode = false;
} else {
continue;
}
}
if cell.c == '\t' {
tab_mode = true;
}
if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {
// Push cells primary character.
text.push(cell.c);
// Push zero-width characters.
for c in cell.zerowidth().into_iter().flatten() {
text.push(*c);
}
}
}
if cols.end >= self.columns() - 1
&& (line_length.0 == 0
|| !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))
{
text.push('\n');
}
// If wide char is not part of the selection, but leading spacer is, include it.
if line_length == self.columns()
&& line_length.0 >= 2
&& grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)
&& include_wrapped_wide
{
text.push(self.grid[line - 1i32][Column(0)].c);
}
text
}
/// Terminal content required for rendering.
#[inline]
pub fn renderable_content(&self) -> RenderableContent<'_>
where
T: EventListener,
{
RenderableContent::new(self)
}
/// Access to the raw grid data structure.
pub fn grid(&self) -> &Grid<Cell> {
&self.grid
}
/// Mutable access to the raw grid data structure.
pub fn grid_mut(&mut self) -> &mut Grid<Cell> {
&mut self.grid
}
/// Resize terminal to new dimensions.
pub fn resize<S: Dimensions>(&mut self, size: S) {
let old_cols = self.columns();
let old_lines = self.screen_lines();
let num_cols = size.columns();
let num_lines = size.screen_lines();
if old_cols == num_cols && old_lines == num_lines {
debug!("Term::resize dimensions unchanged");
return;
}
debug!("New num_cols is {} and num_lines is {}", num_cols, num_lines);
// Move vi mode cursor with the content.
let history_size = self.history_size();
let mut delta = num_lines as i32 - old_lines as i32;
let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);
delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);
self.vi_mode_cursor.point.line += delta;
let is_alt = self.mode.contains(TermMode::ALT_SCREEN);
self.grid.resize(!is_alt, num_lines, num_cols);
self.inactive_grid.resize(is_alt, num_lines, num_cols);
// Invalidate selection and tabs only when necessary.
if old_cols != num_cols {
self.selection = None;
// Recreate tabs list.
self.tabs.resize(num_cols);
} else if let Some(selection) = self.selection.take() {
let max_lines = cmp::max(num_lines, old_lines) as i32;
let range = Line(0)..Line(max_lines);
self.selection = selection.rotate(self, &range, -delta);
}
// Clamp vi cursor to viewport.
let vi_point = self.vi_mode_cursor.point;
let viewport_top = Line(-(self.grid.display_offset() as i32));
let viewport_bottom = viewport_top + self.bottommost_line();
self.vi_mode_cursor.point.line =
cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);
self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());
// Reset scrolling region.
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
// Resize damage information.
self.damage.resize(num_cols, num_lines);
}
/// Active terminal modes.
#[inline]
pub fn mode(&self) -> &TermMode {
&self.mode
}
/// Swap primary and alternate screen buffer.
pub fn swap_alt(&mut self) {
if !self.mode.contains(TermMode::ALT_SCREEN) {
// Set alt screen cursor to the current primary screen cursor.
self.inactive_grid.cursor = self.grid.cursor.clone();
// Drop information about the primary screens saved cursor.
self.grid.saved_cursor = self.grid.cursor.clone();
// Reset alternate screen contents.
self.inactive_grid.reset_region(..);
}
mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);
let keyboard_mode =
self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();
self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);
mem::swap(&mut self.grid, &mut self.inactive_grid);
self.mode ^= TermMode::ALT_SCREEN;
self.selection = None;
self.mark_fully_damaged();
}
/// Scroll screen down.
///
/// Text moves down; clear at bottom
/// Expects origin to be in scroll range.
#[inline]
fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling down relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection =
self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));
// Scroll vi mode cursor.
let line = &mut self.vi_mode_cursor.point.line;
if region.start <= *line && region.end > *line {
*line = cmp::min(*line + lines, region.end - 1);
}
// Scroll between origin and bottom
self.grid.scroll_down(®ion, lines);
self.mark_fully_damaged();
}
/// Scroll screen up
///
/// Text moves up; clear at top
/// Expects origin to be in scroll range.
#[inline]
fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling up relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));
self.grid.scroll_up(®ion, lines);
// Scroll vi mode cursor.
let viewport_top = Line(-(self.grid.display_offset() as i32));
let top = if region.start == 0 { viewport_top } else { region.start };
let line = &mut self.vi_mode_cursor.point.line;
if (top <= *line) && region.end > *line {
*line = cmp::max(*line - lines, top);
}
self.mark_fully_damaged();
}
fn deccolm(&mut self)
where
T: EventListener,
{
// Setting 132 column font makes no sense, but run the other side effects.
// Clear scrolling region.
self.set_scrolling_region(1, None);
// Clear grid.
self.grid.reset_region(..);
self.mark_fully_damaged();
}
#[inline]
pub fn exit(&mut self)
where
T: EventListener,
{
self.event_proxy.send_event(Event::Exit);
}
/// Toggle the vi mode.
#[inline]
pub fn toggle_vi_mode(&mut self)
where
T: EventListener,
{
self.mode ^= TermMode::VI;
if self.mode.contains(TermMode::VI) {
let display_offset = self.grid.display_offset() as i32;
if self.grid.cursor.point.line > self.bottommost_line() - display_offset {
// Move cursor to top-left if terminal cursor is not visible.
let point = Point::new(Line(-display_offset), Column(0));
self.vi_mode_cursor = ViModeCursor::new(point);
} else {
// Reset vi mode cursor position to match primary cursor.
self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);
}
}
// Update UI about cursor blinking state changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
/// Move vi mode cursor.
#[inline]
pub fn vi_motion(&mut self, motion: ViMotion)
where
T: EventListener,
{
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Move cursor.
self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);
self.vi_mode_recompute_selection();
}
/// Move vi cursor to a point in the grid.
#[inline]
pub fn vi_goto_point(&mut self, point: Point)
where
T: EventListener,
{
// Move viewport to make point visible.
self.scroll_to_point(point);
// Move vi cursor to the point.
self.vi_mode_cursor.point = point;
self.vi_mode_recompute_selection();
}
/// Update the active selection to match the vi mode cursor position.
#[inline]
fn vi_mode_recompute_selection(&mut self) {
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Update only if non-empty selection is present.
if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {
selection.update(self.vi_mode_cursor.point, Side::Left);
selection.include_all();
}
}
/// Scroll display to point if it is outside of viewport.
pub fn scroll_to_point(&mut self, point: Point)
where
T: EventListener,
{
let display_offset = self.grid.display_offset() as i32;
let screen_lines = self.grid.screen_lines() as i32;
if point.line < -display_offset {
let lines = point.line + display_offset;
self.scroll_display(Scroll::Delta(-lines.0));
} else if point.line >= (screen_lines - display_offset) {
let lines = point.line + display_offset - screen_lines + 1i32;
self.scroll_display(Scroll::Delta(-lines.0));
}
}
/// Jump to the end of a wide cell.
pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {
let flags = self.grid[point.line][point.column].flags;
match direction {
Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
point.column = Column(1);
point.line += 1;
},
Direction::Right if flags.contains(Flags::WIDE_CHAR) => {
point.column = cmp::min(point.column + 1, self.last_column());
},
Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {
if flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
let prev = point.sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
point = prev;
}
},
_ => (),
}
point
}
#[inline]
pub fn semantic_escape_chars(&self) -> &str {
&self.config.semantic_escape_chars
}
#[cfg(test)]
pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {
self.config.semantic_escape_chars = semantic_escape_chars.into();
}
/// Active terminal cursor style.
///
/// While vi mode is active, this will automatically return the vi mode cursor style.
#[inline]
pub fn cursor_style(&self) -> CursorStyle {
let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);
if self.mode.contains(TermMode::VI) {
self.config.vi_mode_cursor_style.unwrap_or(cursor_style)
} else {
cursor_style
}
}
pub fn colors(&self) -> &Colors {
&self.colors
}
/// Insert a linebreak at the current cursor position.
#[inline]
fn wrapline(&mut self)
where
T: EventListener,
{
if !self.mode.contains(TermMode::LINE_WRAP) {
return;
}
trace!("Wrapping input");
self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);
if self.grid.cursor.point.line + 1 >= self.scroll_region.end {
self.linefeed();
} else {
self.damage_cursor();
self.grid.cursor.point.line += 1;
}
self.grid.cursor.point.column = Column(0);
self.grid.cursor.input_needs_wrap = false;
self.damage_cursor();
}
/// Write `c` to the cell at the cursor position.
#[inline(always)]
fn write_at_cursor(&mut self, c: char) {
let c = self.grid.cursor.charsets[self.active_charset].map(c);
let fg = self.grid.cursor.template.fg;
let bg = self.grid.cursor.template.bg;
let flags = self.grid.cursor.template.flags;
let extra = self.grid.cursor.template.extra.clone();
let mut cursor_cell = self.grid.cursor_cell();
// Clear all related cells when overwriting a fullwidth cell.
if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {
// Remove wide char and spacer.
let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);
let point = self.grid.cursor.point;
if wide && point.column < self.last_column() {
self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);
} else if point.column > 0 {
self.grid[point.line][point.column - 1].clear_wide();
}
// Remove leading spacers.
if point.column <= 1 && point.line != self.topmost_line() {
let column = self.last_column();
self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
}
cursor_cell = self.grid.cursor_cell();
}
cursor_cell.c = c;
cursor_cell.fg = fg;
cursor_cell.bg = bg;
cursor_cell.flags = flags;
cursor_cell.extra = extra;
}
#[inline]
fn damage_cursor(&mut self) {
// The normal cursor coordinates are always in viewport.
let point =
Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);
self.damage.damage_point(point);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {
let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;
self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;
let new_mode = match apply {
KeyboardModesApplyBehavior::Replace => mode,
KeyboardModesApplyBehavior::Union => active_mode.union(mode),
KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),
};
trace!("Setting keyboard mode to {new_mode:?}");
self.mode |= new_mode;
}
}
impl<T> Dimensions for Term<T> {
#[inline]
fn columns(&self) -> usize {
self.grid.columns()
}
#[inline]
fn screen_lines(&self) -> usize {
self.grid.screen_lines()
}
#[inline]
fn total_lines(&self) -> usize {
self.grid.total_lines()
}
}
impl<T: EventListener> Handler for Term<T> {
/// A character to be displayed.
#[inline(never)]
fn input(&mut self, c: char) {
// Number of cells the char will occupy.
let width = match c.width() {
Some(width) => width,
None => return,
};
// Handle zero-width characters.
if width == 0 {
// Get previous column.
let mut column = self.grid.cursor.point.column;
if !self.grid.cursor.input_needs_wrap {
column.0 = column.saturating_sub(1);
}
// Put zerowidth characters over first fullwidth character cell.
let line = self.grid.cursor.point.line;
if self.grid[line][column].flags.contains(Flags::WIDE_CHAR_SPACER) {
column.0 = column.saturating_sub(1);
}
self.grid[line][column].push_zerowidth(c);
return;
}
// Move cursor to next line.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
}
// If in insert mode, first shift cells to the right.
let columns = self.columns();
if self.mode.contains(TermMode::INSERT) && self.grid.cursor.point.column + width < columns {
let line = self.grid.cursor.point.line;
let col = self.grid.cursor.point.column;
let row = &mut self.grid[line][..];
for col in (col.0..(columns - width)).rev() {
row.swap(col + width, col);
}
}
if width == 1 {
self.write_at_cursor(c);
} else {
if self.grid.cursor.point.column + 1 >= columns {
if self.mode.contains(TermMode::LINE_WRAP) {
// Insert placeholder before wide char if glyph does not fit in this row.
self.grid.cursor.template.flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
self.wrapline();
} else {
// Prevent out of bounds crash when linewrapping is disabled.
self.grid.cursor.input_needs_wrap = true;
return;
}
}
// Write full width glyph to current cursor cell.
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR);
self.write_at_cursor(c);
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR);
// Write spacer to cell following the wide glyph.
self.grid.cursor.point.column += 1;
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR_SPACER);
}
if self.grid.cursor.point.column + 1 < columns {
self.grid.cursor.point.column += 1;
} else {
self.grid.cursor.input_needs_wrap = true;
}
}
#[inline]
fn decaln(&mut self) {
trace!("Decalnning");
for line in (0..self.screen_lines()).map(Line::from) {
for column in 0..self.columns() {
let cell = &mut self.grid[line][Column(column)];
*cell = Cell::default();
cell.c = 'E';
}
}
self.mark_fully_damaged();
}
#[inline]
fn goto(&mut self, line: i32, col: usize) {
let line = Line(line);
let col = Column(col);
trace!("Going to: line={}, col={}", line, col);
let (y_offset, max_y) = if self.mode.contains(TermMode::ORIGIN) {
(self.scroll_region.start, self.scroll_region.end - 1)
} else {
(Line(0), self.bottommost_line())
};
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(cmp::min(line + y_offset, max_y), Line(0));
self.grid.cursor.point.column = cmp::min(col, self.last_column());
self.damage_cursor();
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn goto_line(&mut self, line: i32) {
trace!("Going to line: {}", line);
self.goto(line, self.grid.cursor.point.column.0)
}
#[inline]
fn goto_col(&mut self, col: usize) {
trace!("Going to column: {}", col);
self.goto(self.grid.cursor.point.line.0, col)
}
#[inline]
fn insert_blank(&mut self, count: usize) {
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure inserting within terminal bounds
let count = cmp::min(count, self.columns() - cursor.point.column.0);
let source = cursor.point.column;
let destination = cursor.point.column.0 + count;
let num_cells = self.columns() - destination;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in (0..num_cells).rev() {
row.swap(destination + offset, source.0 + offset);
}
// Cells were just moved out toward the end of the line;
// fill in between source and dest with blanks.
for cell in &mut row[source.0..destination] {
*cell = bg.into();
}
}
#[inline]
fn move_up(&mut self, lines: usize) {
trace!("Moving up: {}", lines);
let line = self.grid.cursor.point.line - lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_down(&mut self, lines: usize) {
trace!("Moving down: {}", lines);
let line = self.grid.cursor.point.line + lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_forward(&mut self, cols: usize) {
trace!("Moving forward: {}", cols);
let last_column = cmp::min(self.grid.cursor.point.column + cols, self.last_column());
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, self.grid.cursor.point.column.0, last_column.0);
self.grid.cursor.point.column = last_column;
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn move_backward(&mut self, cols: usize) {
trace!("Moving backward: {}", cols);
let column = self.grid.cursor.point.column.saturating_sub(cols);
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, column, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(column);
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn identify_terminal(&mut self, intermediate: Option<char>) {
match intermediate {
None => {
trace!("Reporting primary device attributes");
let text = String::from("\x1b[?6c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
Some('>') => {
trace!("Reporting secondary device attributes");
let version = version_number(env!("CARGO_PKG_VERSION"));
let text = format!("\x1b[>0;{version};1c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("Unsupported device attributes intermediate"),
}
}
#[inline]
fn report_keyboard_mode(&mut self) {
if !self.config.kitty_keyboard {
return;
}
trace!("Reporting active keyboard mode");
let current_mode =
self.keyboard_mode_stack.last().unwrap_or(&KeyboardModes::NO_MODE).bits();
let text = format!("\x1b[?{current_mode}u");
self.event_proxy.send_event(Event::PtyWrite(text));
}
#[inline]
fn push_keyboard_mode(&mut self, mode: KeyboardModes) {
if !self.config.kitty_keyboard {
return;
}
trace!("Pushing `{mode:?}` keyboard mode into the stack");
if self.keyboard_mode_stack.len() >= KEYBOARD_MODE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of keyboard mode stack that exceeds its maximum depth",
removed
);
}
self.keyboard_mode_stack.push(mode);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn pop_keyboard_modes(&mut self, to_pop: u16) {
if !self.config.kitty_keyboard {
return;
}
trace!("Attempting to pop {to_pop} keyboard modes from the stack");
let new_len = self.keyboard_mode_stack.len().saturating_sub(to_pop as usize);
self.keyboard_mode_stack.truncate(new_len);
// Reload active mode.
let mode = self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: KeyboardModes, apply: KeyboardModesApplyBehavior) {
if !self.config.kitty_keyboard {
return;
}
self.set_keyboard_mode(mode.into(), apply);
}
#[inline]
fn device_status(&mut self, arg: usize) {
trace!("Reporting device status: {}", arg);
match arg {
5 => {
let text = String::from("\x1b[0n");
self.event_proxy.send_event(Event::PtyWrite(text));
},
6 => {
let pos = self.grid.cursor.point;
let text = format!("\x1b[{};{}R", pos.line + 1, pos.column + 1);
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("unknown device status query: {}", arg),
};
}
#[inline]
fn move_down_and_cr(&mut self, lines: usize) {
trace!("Moving down and cr: {}", lines);
let line = self.grid.cursor.point.line + lines;
self.goto(line.0, 0)
}
#[inline]
fn move_up_and_cr(&mut self, lines: usize) {
trace!("Moving up and cr: {}", lines);
let line = self.grid.cursor.point.line - lines;
self.goto(line.0, 0)
}
/// Insert tab at cursor position.
#[inline]
fn put_tab(&mut self, mut count: u16) {
// A tab after the last column is the same as a linebreak.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
return;
}
while self.grid.cursor.point.column < self.columns() && count != 0 {
count -= 1;
let c = self.grid.cursor.charsets[self.active_charset].map('\t');
let cell = self.grid.cursor_cell();
if cell.c == ' ' {
cell.c = c;
}
loop {
if (self.grid.cursor.point.column + 1) == self.columns() {
break;
}
self.grid.cursor.point.column += 1;
if self.tabs[self.grid.cursor.point.column] {
break;
}
}
}
}
/// Backspace.
#[inline]
fn backspace(&mut self) {
trace!("Backspace");
if self.grid.cursor.point.column > Column(0) {
let line = self.grid.cursor.point.line.0 as usize;
let column = self.grid.cursor.point.column.0;
self.grid.cursor.point.column -= 1;
self.grid.cursor.input_needs_wrap = false;
self.damage.damage_line(line, column - 1, column);
}
}
/// Carriage return.
#[inline]
fn carriage_return(&mut self) {
trace!("Carriage return");
let new_col = 0;
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, new_col, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(new_col);
self.grid.cursor.input_needs_wrap = false;
}
/// Linefeed.
#[inline]
fn linefeed(&mut self) {
trace!("Linefeed");
let next = self.grid.cursor.point.line + 1;
if next == self.scroll_region.end {
self.scroll_up(1);
} else if next < self.screen_lines() {
self.damage_cursor();
self.grid.cursor.point.line += 1;
self.damage_cursor();
}
}
/// Set current position as a tabstop.
#[inline]
fn bell(&mut self) {
trace!("Bell");
self.event_proxy.send_event(Event::Bell);
}
#[inline]
fn substitute(&mut self) {
trace!("[unimplemented] Substitute");
}
/// Run LF/NL.
///
/// LF/NL mode has some interesting history. According to ECMA-48 4th
/// edition, in LINE FEED mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause only movement of the active position in
/// > the direction of the line progression.
///
/// In NEW LINE mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause movement to the line home position on
/// > the following line, the following form, etc. In the case of LF this is
/// > referred to as the New Line (NL) option.
///
/// Additionally, ECMA-48 4th edition says that this option is deprecated.
/// ECMA-48 5th edition only mentions this option (without explanation)
/// saying that it's been removed.
///
/// As an emulator, we need to support it since applications may still rely
/// on it.
#[inline]
fn newline(&mut self) {
self.linefeed();
if self.mode.contains(TermMode::LINE_FEED_NEW_LINE) {
self.carriage_return();
}
}
#[inline]
fn set_horizontal_tabstop(&mut self) {
trace!("Setting horizontal tabstop");
self.tabs[self.grid.cursor.point.column] = true;
}
#[inline]
fn scroll_up(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_up_relative(origin, lines);
}
#[inline]
fn scroll_down(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_down_relative(origin, lines);
}
#[inline]
fn insert_blank_lines(&mut self, lines: usize) {
trace!("Inserting blank {} lines", lines);
let origin = self.grid.cursor.point.line;
if self.scroll_region.contains(&origin) {
self.scroll_down_relative(origin, lines);
}
}
#[inline]
fn delete_lines(&mut self, lines: usize) {
let origin = self.grid.cursor.point.line;
let lines = cmp::min(self.screen_lines() - origin.0 as usize, lines);
trace!("Deleting {} lines", lines);
if lines > 0 && self.scroll_region.contains(&origin) {
self.scroll_up_relative(origin, lines);
}
}
#[inline]
fn erase_chars(&mut self, count: usize) {
let cursor = &self.grid.cursor;
trace!("Erasing chars: count={}, col={}", count, cursor.point.column);
let start = cursor.point.column;
let end = cmp::min(start + count, Column(self.columns()));
// Cleared cells have current background color set.
let bg = self.grid.cursor.template.bg;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, start.0, end.0);
let row = &mut self.grid[line];
for cell in &mut row[start..end] {
*cell = bg.into();
}
}
#[inline]
fn delete_chars(&mut self, count: usize) {
let columns = self.columns();
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure deleting within terminal bounds.
let count = cmp::min(count, columns);
let start = cursor.point.column.0;
let end = cmp::min(start + count, columns - 1);
let num_cells = columns - end;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in 0..num_cells {
row.swap(start + offset, end + offset);
}
// Clear last `count` cells in the row. If deleting 1 char, need to delete
// 1 cell.
let end = columns - count;
for cell in &mut row[end..] {
*cell = bg.into();
}
}
#[inline]
fn move_backward_tabs(&mut self, count: u16) {
trace!("Moving backward {} tabs", count);
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == 0 {
break;
}
for i in (0..(col.0)).rev() {
if self.tabs[index::Column(i)] {
col = index::Column(i);
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, self.grid.cursor.point.column.0, old_col);
}
#[inline]
fn move_forward_tabs(&mut self, count: u16) {
trace!("Moving forward {} tabs", count);
let num_cols = self.columns();
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == num_cols - 1 {
break;
}
for i in col.0 + 1..num_cols {
col = index::Column(i);
if self.tabs[col] {
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, old_col, self.grid.cursor.point.column.0);
}
#[inline]
fn save_cursor_position(&mut self) {
trace!("Saving cursor position");
self.grid.saved_cursor = self.grid.cursor.clone();
}
#[inline]
fn restore_cursor_position(&mut self) {
trace!("Restoring cursor position");
self.damage_cursor();
self.grid.cursor = self.grid.saved_cursor.clone();
self.damage_cursor();
}
#[inline]
fn clear_line(&mut self, mode: ansi::LineClearMode) {
trace!("Clearing line: {:?}", mode);
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
let point = cursor.point;
let (left, right) = match mode {
ansi::LineClearMode::Right if cursor.input_needs_wrap => return,
ansi::LineClearMode::Right => (point.column, Column(self.columns())),
ansi::LineClearMode::Left => (Column(0), point.column + 1),
ansi::LineClearMode::All => (Column(0), Column(self.columns())),
};
self.damage.damage_line(point.line.0 as usize, left.0, right.0 - 1);
let row = &mut self.grid[point.line];
for cell in &mut row[left..right] {
*cell = bg.into();
}
let range = self.grid.cursor.point.line..=self.grid.cursor.point.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
}
/// Set the indexed color value.
#[inline]
fn set_color(&mut self, index: usize, color: Rgb) {
trace!("Setting color[{}] = {:?}", index, color);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index] != Some(color) {
self.mark_fully_damaged();
}
self.colors[index] = Some(color);
}
/// Respond to a color query escape sequence.
#[inline]
fn dynamic_color_sequence(&mut self, prefix: String, index: usize, terminator: &str) {
trace!("Requested write of escape sequence for color code {}: color[{}]", prefix, index);
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ColorRequest(
index,
Arc::new(move |color| {
format!(
"\x1b]{};rgb:{1:02x}{1:02x}/{2:02x}{2:02x}/{3:02x}{3:02x}{4}",
prefix, color.r, color.g, color.b, terminator
)
}),
));
}
/// Reset the indexed color to original value.
#[inline]
fn reset_color(&mut self, index: usize) {
trace!("Resetting color[{}]", index);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index].is_some() {
self.mark_fully_damaged();
}
self.colors[index] = None;
}
/// Store data into clipboard.
#[inline]
fn clipboard_store(&mut self, clipboard: u8, base64: &[u8]) {
if !matches!(self.config.osc52, Osc52::OnlyCopy | Osc52::CopyPaste) {
debug!("Denied osc52 store");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
if let Ok(bytes) = Base64.decode(base64) {
if let Ok(text) = String::from_utf8(bytes) {
self.event_proxy.send_event(Event::ClipboardStore(clipboard_type, text));
}
}
}
/// Load data from clipboard.
#[inline]
fn clipboard_load(&mut self, clipboard: u8, terminator: &str) {
if !matches!(self.config.osc52, Osc52::OnlyPaste | Osc52::CopyPaste) {
debug!("Denied osc52 load");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ClipboardLoad(
clipboard_type,
Arc::new(move |text| {
let base64 = Base64.encode(text);
format!("\x1b]52;{};{}{}", clipboard as char, base64, terminator)
}),
));
}
#[inline]
fn clear_screen(&mut self, mode: ansi::ClearMode) {
trace!("Clearing screen: {:?}", mode);
let bg = self.grid.cursor.template.bg;
let screen_lines = self.screen_lines();
match mode {
ansi::ClearMode::Above => {
let cursor = self.grid.cursor.point;
// If clearing more than one line.
if cursor.line > 1 {
// Fully clear all lines before the current line.
self.grid.reset_region(..cursor.line);
}
// Clear up to the current column in the current line.
let end = cmp::min(cursor.column + 1, Column(self.columns()));
for cell in &mut self.grid[cursor.line][..end] {
*cell = bg.into();
}
let range = Line(0)..=cursor.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::Below => {
let cursor = self.grid.cursor.point;
for cell in &mut self.grid[cursor.line][cursor.column..] {
*cell = bg.into();
}
if (cursor.line.0 as usize) < screen_lines - 1 {
self.grid.reset_region((cursor.line + 1)..);
}
let range = cursor.line..Line(screen_lines as i32);
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::All => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.grid.reset_region(..);
} else {
let old_offset = self.grid.display_offset();
self.grid.clear_viewport();
// Compute number of lines scrolled by clearing the viewport.
let lines = self.grid.display_offset().saturating_sub(old_offset);
self.vi_mode_cursor.point.line =
(self.vi_mode_cursor.point.line - lines).grid_clamp(self, Boundary::Grid);
}
self.selection = None;
},
ansi::ClearMode::Saved if self.history_size() > 0 => {
self.grid.clear_history();
self.vi_mode_cursor.point.line =
self.vi_mode_cursor.point.line.grid_clamp(self, Boundary::Cursor);
self.selection = self.selection.take().filter(|s| !s.intersects_range(..Line(0)));
},
// We have no history to clear.
ansi::ClearMode::Saved => (),
}
self.mark_fully_damaged();
}
#[inline]
fn clear_tabs(&mut self, mode: ansi::TabulationClearMode) {
trace!("Clearing tabs: {:?}", mode);
match mode {
ansi::TabulationClearMode::Current => {
self.tabs[self.grid.cursor.point.column] = false;
},
ansi::TabulationClearMode::All => {
self.tabs.clear_all();
},
}
}
/// Reset all important fields in the term struct.
#[inline]
fn reset_state(&mut self) {
if self.mode.contains(TermMode::ALT_SCREEN) {
mem::swap(&mut self.grid, &mut self.inactive_grid);
}
self.active_charset = Default::default();
self.cursor_style = None;
self.grid.reset();
self.inactive_grid.reset();
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
self.tabs = TabStops::new(self.columns());
self.title_stack = Vec::new();
self.title = None;
self.selection = None;
self.vi_mode_cursor = Default::default();
self.keyboard_mode_stack = Default::default();
self.inactive_keyboard_mode_stack = Default::default();
// Preserve vi mode across resets.
self.mode &= TermMode::VI;
self.mode.insert(TermMode::default());
self.event_proxy.send_event(Event::CursorBlinkingChange);
self.mark_fully_damaged();
}
#[inline]
fn reverse_index(&mut self) {
trace!("Reversing index");
// If cursor is at the top.
if self.grid.cursor.point.line == self.scroll_region.start {
self.scroll_down(1);
} else {
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(self.grid.cursor.point.line - 1, Line(0));
self.damage_cursor();
}
}
#[inline]
fn set_hyperlink(&mut self, hyperlink: Option<Hyperlink>) {
trace!("Setting hyperlink: {:?}", hyperlink);
self.grid.cursor.template.set_hyperlink(hyperlink.map(|e| e.into()));
}
/// Set a terminal attribute.
#[inline]
fn terminal_attribute(&mut self, attr: Attr) {
trace!("Setting attribute: {:?}", attr);
let cursor = &mut self.grid.cursor;
match attr {
Attr::Foreground(color) => cursor.template.fg = color,
Attr::Background(color) => cursor.template.bg = color,
Attr::UnderlineColor(color) => cursor.template.set_underline_color(color),
Attr::Reset => {
cursor.template.fg = Color::Named(NamedColor::Foreground);
cursor.template.bg = Color::Named(NamedColor::Background);
cursor.template.flags = Flags::empty();
cursor.template.set_underline_color(None);
},
Attr::Reverse => cursor.template.flags.insert(Flags::INVERSE),
Attr::CancelReverse => cursor.template.flags.remove(Flags::INVERSE),
Attr::Bold => cursor.template.flags.insert(Flags::BOLD),
Attr::CancelBold => cursor.template.flags.remove(Flags::BOLD),
Attr::Dim => cursor.template.flags.insert(Flags::DIM),
Attr::CancelBoldDim => cursor.template.flags.remove(Flags::BOLD | Flags::DIM),
Attr::Italic => cursor.template.flags.insert(Flags::ITALIC),
Attr::CancelItalic => cursor.template.flags.remove(Flags::ITALIC),
Attr::Underline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERLINE);
},
Attr::DoubleUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOUBLE_UNDERLINE);
},
Attr::Undercurl => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERCURL);
},
Attr::DottedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOTTED_UNDERLINE);
},
Attr::DashedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DASHED_UNDERLINE);
},
Attr::CancelUnderline => cursor.template.flags.remove(Flags::ALL_UNDERLINES),
Attr::Hidden => cursor.template.flags.insert(Flags::HIDDEN),
Attr::CancelHidden => cursor.template.flags.remove(Flags::HIDDEN),
Attr::Strike => cursor.template.flags.insert(Flags::STRIKEOUT),
Attr::CancelStrike => cursor.template.flags.remove(Flags::STRIKEOUT),
_ => {
debug!("Term got unhandled attr: {:?}", attr);
},
}
}
#[inline]
fn set_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_private_mode", mode);
return;
},
};
trace!("Setting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.insert(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if !self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.insert(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.insert(TermMode::APP_CURSOR),
// Mouse protocols are mutually exclusive.
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.insert(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.insert(TermMode::BRACKETED_PASTE),
// Mouse encodings are mutually exclusive.
NamedPrivateMode::SgrMouse => {
self.mode.remove(TermMode::UTF8_MOUSE);
self.mode.insert(TermMode::SGR_MOUSE);
},
NamedPrivateMode::Utf8Mouse => {
self.mode.remove(TermMode::SGR_MOUSE);
self.mode.insert(TermMode::UTF8_MOUSE);
},
NamedPrivateMode::AlternateScroll => self.mode.insert(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.insert(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.insert(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = true;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn unset_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_private_mode", mode);
return;
},
};
trace!("Unsetting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.remove(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.remove(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.remove(TermMode::APP_CURSOR),
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.remove(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.remove(TermMode::BRACKETED_PASTE),
NamedPrivateMode::SgrMouse => self.mode.remove(TermMode::SGR_MOUSE),
NamedPrivateMode::Utf8Mouse => self.mode.remove(TermMode::UTF8_MOUSE),
NamedPrivateMode::AlternateScroll => self.mode.remove(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.remove(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.remove(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = false;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn report_private_mode(&mut self, mode: PrivateMode) {
trace!("Reporting private mode {mode:?}");
let state = match mode {
PrivateMode::Named(mode) => match mode {
NamedPrivateMode::CursorKeys => self.mode.contains(TermMode::APP_CURSOR).into(),
NamedPrivateMode::Origin => self.mode.contains(TermMode::ORIGIN).into(),
NamedPrivateMode::LineWrap => self.mode.contains(TermMode::LINE_WRAP).into(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking.into()
},
NamedPrivateMode::ShowCursor => self.mode.contains(TermMode::SHOW_CURSOR).into(),
NamedPrivateMode::ReportMouseClicks => {
self.mode.contains(TermMode::MOUSE_REPORT_CLICK).into()
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.contains(TermMode::MOUSE_DRAG).into()
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.contains(TermMode::MOUSE_MOTION).into()
},
NamedPrivateMode::ReportFocusInOut => {
self.mode.contains(TermMode::FOCUS_IN_OUT).into()
},
NamedPrivateMode::Utf8Mouse => self.mode.contains(TermMode::UTF8_MOUSE).into(),
NamedPrivateMode::SgrMouse => self.mode.contains(TermMode::SGR_MOUSE).into(),
NamedPrivateMode::AlternateScroll => {
self.mode.contains(TermMode::ALTERNATE_SCROLL).into()
},
NamedPrivateMode::UrgencyHints => {
self.mode.contains(TermMode::URGENCY_HINTS).into()
},
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
self.mode.contains(TermMode::ALT_SCREEN).into()
},
NamedPrivateMode::BracketedPaste => {
self.mode.contains(TermMode::BRACKETED_PASTE).into()
},
NamedPrivateMode::SyncUpdate => ModeState::Reset,
NamedPrivateMode::ColumnMode => ModeState::NotSupported,
},
PrivateMode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[?{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => self.mode.insert(TermMode::INSERT),
NamedMode::LineFeedNewLine => self.mode.insert(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn unset_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => {
self.mode.remove(TermMode::INSERT);
self.mark_fully_damaged();
},
NamedMode::LineFeedNewLine => self.mode.remove(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn report_mode(&mut self, mode: ansi::Mode) {
trace!("Reporting mode {mode:?}");
let state = match mode {
ansi::Mode::Named(mode) => match mode {
NamedMode::Insert => self.mode.contains(TermMode::INSERT).into(),
NamedMode::LineFeedNewLine => {
self.mode.contains(TermMode::LINE_FEED_NEW_LINE).into()
},
},
ansi::Mode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_scrolling_region(&mut self, top: usize, bottom: Option<usize>) {
// Fallback to the last line as default.
let bottom = bottom.unwrap_or_else(|| self.screen_lines());
if top >= bottom {
debug!("Invalid scrolling region: ({};{})", top, bottom);
return;
}
// Bottom should be included in the range, but range end is not
// usually included. One option would be to use an inclusive
// range, but instead we just let the open range end be 1
// higher.
let start = Line(top as i32 - 1);
let end = Line(bottom as i32);
trace!("Setting scrolling region: ({};{})", start, end);
let screen_lines = Line(self.screen_lines() as i32);
self.scroll_region.start = cmp::min(start, screen_lines);
self.scroll_region.end = cmp::min(end, screen_lines);
self.goto(0, 0);
}
#[inline]
fn set_keypad_application_mode(&mut self) {
trace!("Setting keypad application mode");
self.mode.insert(TermMode::APP_KEYPAD);
}
#[inline]
fn unset_keypad_application_mode(&mut self) {
trace!("Unsetting keypad application mode");
self.mode.remove(TermMode::APP_KEYPAD);
}
#[inline]
fn configure_charset(&mut self, index: CharsetIndex, charset: StandardCharset) {
trace!("Configuring charset {:?} as {:?}", index, charset);
self.grid.cursor.charsets[index] = charset;
}
#[inline]
fn set_active_charset(&mut self, index: CharsetIndex) {
trace!("Setting active charset {:?}", index);
self.active_charset = index;
}
#[inline]
fn set_cursor_style(&mut self, style: Option<CursorStyle>) {
trace!("Setting cursor style {:?}", style);
self.cursor_style = style;
// Notify UI about blinking changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
#[inline]
fn set_cursor_shape(&mut self, shape: CursorShape) {
trace!("Setting cursor shape {:?}", shape);
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.shape = shape;
}
#[inline]
fn set_title(&mut self, title: Option<String>) {
trace!("Setting title to '{:?}'", title);
self.title.clone_from(&title);
let title_event = match title {
Some(title) => Event::Title(title),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
}
#[inline]
fn push_title(&mut self) {
trace!("Pushing '{:?}' onto title stack", self.title);
if self.title_stack.len() >= TITLE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of title stack that exceeds its maximum depth",
removed
);
}
self.title_stack.push(self.title.clone());
}
#[inline]
fn pop_title(&mut self) {
trace!("Attempting to pop title from stack...");
if let Some(popped) = self.title_stack.pop() {
trace!("Title '{:?}' popped from stack", popped);
self.set_title(popped);
}
}
#[inline]
fn text_area_size_pixels(&mut self) {
self.event_proxy.send_event(Event::TextAreaSizeRequest(Arc::new(move |window_size| {
let height = window_size.num_lines * window_size.cell_height;
let width = window_size.num_cols * window_size.cell_width;
format!("\x1b[4;{height};{width}t")
})));
}
#[inline]
fn text_area_size_chars(&mut self) {
let text = format!("\x1b[8;{};{}t", self.screen_lines(), self.columns());
self.event_proxy.send_event(Event::PtyWrite(text));
}
}
/// The state of the [`Mode`] and [`PrivateMode`].
#[repr(u8)]
#[derive(Debug, Clone, Copy)]
enum ModeState {
/// The mode is not supported.
NotSupported = 0,
/// The mode is currently set.
Set = 1,
/// The mode is currently not set.
Reset = 2,
}
impl From<bool> for ModeState {
fn from(value: bool) -> Self {
if value {
Self::Set
} else {
Self::Reset
}
}
}
/// Terminal version for escape sequence reports.
///
/// This returns the current terminal version as a unique number based on alacritty_terminal's
/// semver version. The different versions are padded to ensure that a higher semver version will
/// always report a higher version number.
fn version_number(mut version: &str) -> usize {
if let Some(separator) = version.rfind('-') {
version = &version[..separator];
}
let mut version_number = 0;
let semver_versions = version.split('.');
for (i, semver_version) in semver_versions.rev().enumerate() {
let semver_number = semver_version.parse::<usize>().unwrap_or(0);
version_number += usize::pow(100, i as u32) * semver_number;
}
version_number
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClipboardType {
Clipboard,
Selection,
}
struct TabStops {
tabs: Vec<bool>,
}
impl TabStops {
#[inline]
fn new(columns: usize) -> TabStops {
TabStops { tabs: (0..columns).map(|i| i % INITIAL_TABSTOPS == 0).collect() }
}
/// Remove all tabstops.
#[inline]
fn clear_all(&mut self) {
unsafe {
ptr::write_bytes(self.tabs.as_mut_ptr(), 0, self.tabs.len());
}
}
/// Increase tabstop capacity.
#[inline]
fn resize(&mut self, columns: usize) {
let mut index = self.tabs.len();
self.tabs.resize_with(columns, || {
let is_tabstop = index % INITIAL_TABSTOPS == 0;
index += 1;
is_tabstop
});
}
}
impl Index<Column> for TabStops {
type Output = bool;
fn index(&self, index: Column) -> &bool {
&self.tabs[index.0]
}
}
impl IndexMut<Column> for TabStops {
fn index_mut(&mut self, index: Column) -> &mut bool {
self.tabs.index_mut(index.0)
}
}
/// Terminal cursor rendering information.
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct RenderableCursor {
pub shape: CursorShape,
pub point: Point,
}
impl RenderableCursor {
fn new<T>(term: &Term<T>) -> Self {
// Cursor position.
let vi_mode = term.mode().contains(TermMode::VI);
let mut point = if vi_mode { term.vi_mode_cursor.point } else { term.grid.cursor.point };
if term.grid[point].flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
// Cursor shape.
let shape = if !vi_mode && !term.mode().contains(TermMode::SHOW_CURSOR) {
CursorShape::Hidden
} else {
term.cursor_style().shape
};
Self { shape, point }
}
}
/// Visible terminal content.
///
/// This contains all content required to render the current terminal view.
pub struct RenderableContent<'a> {
pub display_iter: GridIterator<'a, Cell>,
pub selection: Option<SelectionRange>,
pub cursor: RenderableCursor,
pub display_offset: usize,
pub colors: &'a color::Colors,
pub mode: TermMode,
}
impl<'a> RenderableContent<'a> {
fn new<T>(term: &'a Term<T>) -> Self {
Self {
display_iter: term.grid().display_iter(),
display_offset: term.grid().display_offset(),
cursor: RenderableCursor::new(term),
selection: term.selection.as_ref().and_then(|s| s.to_range(term)),
colors: &term.colors,
mode: *term.mode(),
}
}
}
/// Terminal test helpers.
pub mod test {
use super::*;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::VoidListener;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TermSize {
pub columns: usize,
pub screen_lines: usize,
}
impl TermSize {
pub fn new(columns: usize, screen_lines: usize) -> Self {
Self { columns, screen_lines }
}
}
impl Dimensions for TermSize {
fn total_lines(&self) -> usize {
self.screen_lines()
}
fn screen_lines(&self) -> usize {
self.screen_lines
}
fn columns(&self) -> usize {
self.columns
}
}
/// Construct a terminal from its content as string.
///
/// A `\n` will break line and `\r\n` will break line without wrapping.
///
/// # Examples
///
/// ```rust
/// use alacritty_terminal::term::test::mock_term;
///
/// // Create a terminal with the following cells:
/// //
/// // [h][e][l][l][o] <- WRAPLINE flag set
/// // [:][)][ ][ ][ ]
/// // [t][e][s][t][ ]
/// mock_term(
/// "\
/// hello\n:)\r\ntest",
/// );
/// ```
pub fn mock_term(content: &str) -> Term<VoidListener> {
let lines: Vec<&str> = content.split('\n').collect();
let num_cols = lines
.iter()
.map(|line| line.chars().filter(|c| *c != '\r').map(|c| c.width().unwrap()).sum())
.max()
.unwrap_or(0);
// Create terminal with the appropriate dimensions.
let size = TermSize::new(num_cols, lines.len());
let mut term = Term::new(Config::default(), &size, VoidListener);
// Fill terminal with content.
for (line, text) in lines.iter().enumerate() {
let line = Line(line as i32);
if !text.ends_with('\r') && line + 1 != lines.len() {
term.grid[line][Column(num_cols - 1)].flags.insert(Flags::WRAPLINE);
}
let mut index = 0;
for c in text.chars().take_while(|c| *c != '\r') {
term.grid[line][Column(index)].c = c;
// Handle fullwidth characters.
let width = c.width().unwrap();
if width == 2 {
term.grid[line][Column(index)].flags.insert(Flags::WIDE_CHAR);
term.grid[line][Column(index + 1)].flags.insert(Flags::WIDE_CHAR_SPACER);
}
index += width;
}
}
term
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
use crate::event::VoidListener;
use crate::grid::{Grid, Scroll};
use crate::index::{Column, Point, Side};
use crate::selection::{Selection, SelectionType};
use crate::term::cell::{Cell, Flags};
use crate::term::test::TermSize;
use crate::vte::ansi::{self, CharsetIndex, Handler, StandardCharset};
#[test]
fn scroll_display_page_up() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Scrollable amount to top is 11.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 10);
// Scrollable amount to top is 1.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
// Scrollable amount to top is 0.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
}
#[test]
fn scroll_display_page_down() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Change display_offset to topmost.
term.grid_mut().scroll_display(Scroll::Top);
term.vi_mode_cursor = ViModeCursor::new(Point::new(Line(-11), Column(0)));
// Scrollable amount to bottom is 11.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 1);
// Scrollable amount to bottom is 1.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
// Scrollable amount to bottom is 0.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn simple_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 0..4 {
if i == 1 {
continue;
}
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(0)].c = ' ';
grid[Line(2)][Column(4)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(0)].c = ' ';
// Multiple lines contain an empty line.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(0), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(2), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\"aaa\"\n\n aaa ")));
// A wrapline.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(2), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from(" aaa aaa\"")));
}
#[test]
fn semantic_selection_works() {
let size = TermSize::new(5, 3);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(3, 5, 0);
for i in 0..5 {
for j in 0..2 {
grid[Line(j)][Column(i)].c = 'a';
}
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
grid[Line(1)][Column(2)].c = '"';
grid[Line(0)][Column(4)].flags.insert(Flags::WRAPLINE);
let mut escape_chars = String::from("\"");
mem::swap(&mut term.grid, &mut grid);
mem::swap(&mut term.config.semantic_escape_chars, &mut escape_chars);
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(4) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(1), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
}
#[test]
fn line_selection_works() {
let size = TermSize::new(5, 1);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(1, 5, 0);
for i in 0..5 {
grid[Line(0)][Column(i)].c = 'a';
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
mem::swap(&mut term.grid, &mut grid);
term.selection = Some(Selection::new(
SelectionType::Lines,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("\"aa\"a\n")));
}
#[test]
fn block_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 1..4 {
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(2)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(4)].c = ' ';
term.selection = Some(Selection::new(
SelectionType::Block,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
// The same column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(3) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\na\na")));
// The first column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(0) }, Side::Left);
}
assert_eq!(term.selection_to_string(), Some(String::from("\n\"aa\n\"a\n\"aa")));
// The last column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\"\na\"\na")));
}
/// Check that the grid can be serialized back and forth losslessly.
///
/// This test is in the term module as opposed to the grid since we want to
/// test this property with a T=Cell.
#[test]
#[cfg(feature = "serde")]
fn grid_serde() {
let grid: Grid<Cell> = Grid::new(24, 80, 0);
let serialized = serde_json::to_string(&grid).expect("ser");
let deserialized = serde_json::from_str::<Grid<Cell>>(&serialized).expect("de");
assert_eq!(deserialized, grid);
}
#[test]
fn input_line_drawing_character() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
let cursor = Point::new(Line(0), Column(0));
term.configure_charset(CharsetIndex::G0, StandardCharset::SpecialCharacterAndLineDrawing);
term.input('a');
assert_eq!(term.grid()[cursor].c, '▒');
}
#[test]
fn clearing_viewport_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
}
#[test]
fn clearing_viewport_with_vi_mode_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-5), Column(3)));
}
#[test]
fn clearing_scrollback_resets_display_offset() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn clearing_scrollback_sets_vi_cursor_into_viewport() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(3)));
}
#[test]
fn clear_saved_lines() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Add one line of scrollback.
term.grid.scroll_up(&(Line(0)..Line(1)), 1);
// Clear the history.
term.clear_screen(ansi::ClearMode::Saved);
// Make sure that scrolling does not change the grid.
let mut scrolled_grid = term.grid.clone();
scrolled_grid.scroll_display(Scroll::Top);
// Truncate grids for comparison.
scrolled_grid.truncate();
term.grid.truncate();
assert_eq!(term.grid, scrolled_grid);
}
#[test]
fn vi_cursor_keep_pos_on_scrollback_buffer() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point.line = Line(-11);
term.linefeed();
assert_eq!(term.vi_mode_cursor.point.line, Line(-12));
}
#[test]
fn grow_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn grow_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn shrink_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn shrink_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn damage_public_usage() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
// Test that we damage input form [`Term::input`].
let left = term.grid.cursor.point.column.0;
term.input('d');
term.input('a');
term.input('m');
term.input('a');
term.input('g');
term.input('e');
let right = term.grid.cursor.point.column.0;
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), Some(LineDamageBounds { line: 0, left, right }));
assert_eq!(damaged_lines.next(), None);
term.reset_damage();
// Create scrollback.
for _ in 0..20 {
term.newline();
}
match term.damage() {
TermDamage::Full => (),
TermDamage::Partial(_) => panic!("Expected Full damage, however got Partial "),
};
term.reset_damage();
term.scroll_display(Scroll::Delta(10));
term.reset_damage();
// No damage when scrolled into viewport.
for idx in 0..term.columns() {
term.goto(idx as i32, idx);
}
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), None);
// Scroll back into the viewport, so we have 2 visible lines which terminal can write
// to.
term.scroll_display(Scroll::Delta(-2));
term.reset_damage();
term.goto(0, 0);
term.goto(1, 0);
term.goto(2, 0);
let display_offset = term.grid().display_offset();
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset, left: 0, right: 0 })
);
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset + 1, left: 0, right: 0 })
);
assert_eq!(damaged_lines.next(), None);
}
#[test]
fn damage_cursor_movements() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
let num_cols = term.columns();
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
term.goto(1, 1);
// NOTE While we can use `[Term::damage]` to access terminal damage information, in the
// following tests we will be accessing `term.damage.lines` directly to avoid adding extra
// damage information (like cursor and Vi cursor), which we're not testing.
assert_eq!(term.damage.lines[0], LineDamageBounds { line: 0, left: 0, right: 0 });
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
term.damage.reset(num_cols);
term.move_forward(3);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 4 });
term.damage.reset(num_cols);
term.move_backward(8);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 0, right: 4 });
term.goto(5, 5);
term.damage.reset(num_cols);
term.backspace();
term.backspace();
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 5 });
term.damage.reset(num_cols);
term.move_up(1);
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
term.damage.reset(num_cols);
term.move_down(1);
term.move_down(1);
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
term.damage.reset(num_cols);
term.wrapline();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 0 });
term.move_forward(3);
term.move_up(1);
term.damage.reset(num_cols);
term.linefeed();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 3, right: 3 });
term.damage.reset(num_cols);
term.carriage_return();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 3 });
term.damage.reset(num_cols);
term.erase_chars(5);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 5 });
term.damage.reset(num_cols);
term.delete_chars(3);
let right = term.columns() - 1;
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.move_forward(term.columns());
term.damage.reset(num_cols);
term.move_backward_tabs(1);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.save_cursor_position();
term.goto(1, 1);
term.damage.reset(num_cols);
term.restore_cursor_position();
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::All);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Left);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Right);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.damage.reset(num_cols);
term.reverse_index();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 8, right: 8 });
}
#[test]
fn full_damage() {
let size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
assert!(term.damage.full);
for _ in 0..20 {
term.newline();
}
term.reset_damage();
term.clear_screen(ansi::ClearMode::Above);
assert!(term.damage.full);
term.reset_damage();
term.scroll_display(Scroll::Top);
assert!(term.damage.full);
term.reset_damage();
// Sequential call to scroll display without doing anything shouldn't damage.
term.scroll_display(Scroll::Top);
assert!(!term.damage.full);
term.reset_damage();
term.set_options(Config::default());
assert!(term.damage.full);
term.reset_damage();
term.scroll_down_relative(Line(5), 2);
assert!(term.damage.full);
term.reset_damage();
term.scroll_up_relative(Line(3), 2);
assert!(term.damage.full);
term.reset_damage();
term.deccolm();
assert!(term.damage.full);
term.reset_damage();
term.decaln();
assert!(term.damage.full);
term.reset_damage();
term.set_mode(NamedMode::Insert.into());
// Just setting `Insert` mode shouldn't mark terminal as damaged.
assert!(!term.damage.full);
term.reset_damage();
let color_index = 257;
term.set_color(color_index, Rgb::default());
assert!(term.damage.full);
term.reset_damage();
// Setting the same color once again shouldn't trigger full damage.
term.set_color(color_index, Rgb::default());
assert!(!term.damage.full);
term.reset_color(color_index);
assert!(term.damage.full);
term.reset_damage();
// We shouldn't trigger fully damage when cursor gets update.
term.set_color(NamedColor::Cursor as usize, Rgb::default());
assert!(!term.damage.full);
// However requesting terminal damage should mark terminal as fully damaged in `Insert`
// mode.
let _ = term.damage();
assert!(term.damage.full);
term.reset_damage();
term.unset_mode(NamedMode::Insert.into());
assert!(term.damage.full);
term.reset_damage();
// Keep this as a last check, so we don't have to deal with restoring from alt-screen.
term.swap_alt();
assert!(term.damage.full);
term.reset_damage();
let size = TermSize::new(10, 10);
term.resize(size);
assert!(term.damage.full);
}
#[test]
fn window_title() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Title None by default.
assert_eq!(term.title, None);
// Title can be set.
term.set_title(Some("Test".into()));
assert_eq!(term.title, Some("Test".into()));
// Title can be pushed onto stack.
term.push_title();
term.set_title(Some("Next".into()));
assert_eq!(term.title, Some("Next".into()));
assert_eq!(term.title_stack.first().unwrap(), &Some("Test".into()));
// Title can be popped from stack and set as the window title.
term.pop_title();
assert_eq!(term.title, Some("Test".into()));
assert!(term.title_stack.is_empty());
// Title stack doesn't grow infinitely.
for _ in 0..4097 {
term.push_title();
}
assert_eq!(term.title_stack.len(), 4096);
// Title and title stack reset when terminal state is reset.
term.push_title();
term.reset_state();
assert_eq!(term.title, None);
assert!(term.title_stack.is_empty());
// Title stack pops back to default.
term.title = None;
term.push_title();
term.set_title(Some("Test".into()));
term.pop_title();
assert_eq!(term.title, None);
// Title can be reset to default.
term.title = Some("Test".into());
term.set_title(None);
assert_eq!(term.title, None);
}
#[test]
fn parse_cargo_version() {
assert!(version_number(env!("CARGO_PKG_VERSION")) >= 10_01);
assert_eq!(version_number("0.0.1-dev"), 1);
assert_eq!(version_number("0.1.2-dev"), 1_02);
assert_eq!(version_number("1.2.3-dev"), 1_02_03);
assert_eq!(version_number("999.99.99"), 9_99_99_99);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
" pub struct KeyboardModes : u8 {\n /// No keyboard protocol mode is set.\n const NO_MODE = 0b0000_0000;\n /// Report `Esc`, `alt` + `key`, `ctrl` + `key`, `ctrl` + `alt` + `key`, `shift`\n /// + `alt` + `key` keys using `CSI u` sequence instead of raw ones.\n const DISAMBIGUATE_ESC_CODES = 0b0000_0001;\n /// Report key presses, release, and repetition alongside the escape. Key events\n /// that result in text are reported as plain UTF-8, unless the\n /// [`Self::REPORT_ALL_KEYS_AS_ESC`] is enabled.\n const REPORT_EVENT_TYPES = 0b0000_0010;\n /// Additionally report shifted key an dbase layout key.\n const REPORT_ALTERNATE_KEYS = 0b0000_0100;\n /// Report every key as an escape sequence.\n const REPORT_ALL_KEYS_AS_ESC = 0b0000_1000;\n /// Report the text generated by the key event.\n const REPORT_ASSOCIATED_TEXT = 0b0001_0000;\n }"
],
"name": "value",
"type": "KeyboardModes"
}
],
"end_line": 110,
"name": "from",
"signature": "fn from(value: KeyboardModes) -> Self",
"start_line": 91
} | {
"class_name": "impl From<KeyboardModes> for TermMode {\n fn from(value: KeyboardModes) -> Self {\n let mut mode = Self::empty();\n\n let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);\n mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);\n\n let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);\n mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);\n\n let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);\n mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);\n\n let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);\n mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);\n\n let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);\n mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);\n\n mode\n }\n}",
"class_signature": "impl From<KeyboardModes> for TermMode"
} |
new | alacritty-master/alacritty_terminal/src/term/mod.rs | pub fn new(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {
let num_cols = dimensions.columns();
let num_lines = dimensions.screen_lines();
let history_size = config.scrolling_history;
let grid = Grid::new(num_lines, num_cols, history_size);
let inactive_grid = Grid::new(num_lines, num_cols, 0);
let tabs = TabStops::new(grid.columns());
let scroll_region = Line(0)..Line(grid.screen_lines() as i32);
// Initialize terminal damage, covering the entire terminal upon launch.
let damage = TermDamageState::new(num_cols, num_lines);
Term {
inactive_grid,
scroll_region,
event_proxy,
damage,
config,
grid,
tabs,
inactive_keyboard_mode_stack: Default::default(),
keyboard_mode_stack: Default::default(),
active_charset: Default::default(),
vi_mode_cursor: Default::default(),
cursor_style: Default::default(),
colors: color::Colors::default(),
title_stack: Default::default(),
is_focused: Default::default(),
selection: Default::default(),
title: Default::default(),
mode: Default::default(),
}
} | //! Exports the `Term` type which is a high-level API for the Grid.
use std::ops::{Index, IndexMut, Range};
use std::sync::Arc;
use std::{cmp, mem, ptr, slice, str};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use base64::engine::general_purpose::STANDARD as Base64;
use base64::Engine;
use bitflags::bitflags;
use log::{debug, trace};
use unicode_width::UnicodeWidthChar;
use crate::event::{Event, EventListener};
use crate::grid::{Dimensions, Grid, GridIterator, Scroll};
use crate::index::{self, Boundary, Column, Direction, Line, Point, Side};
use crate::selection::{Selection, SelectionRange, SelectionType};
use crate::term::cell::{Cell, Flags, LineLength};
use crate::term::color::Colors;
use crate::vi_mode::{ViModeCursor, ViMotion};
use crate::vte::ansi::{
self, Attr, CharsetIndex, Color, CursorShape, CursorStyle, Handler, Hyperlink, KeyboardModes,
KeyboardModesApplyBehavior, NamedColor, NamedMode, NamedPrivateMode, PrivateMode, Rgb,
StandardCharset,
};
pub mod cell;
pub mod color;
pub mod search;
/// Minimum number of columns.
///
/// A minimum of 2 is necessary to hold fullwidth unicode characters.
pub const MIN_COLUMNS: usize = 2;
/// Minimum number of visible lines.
pub const MIN_SCREEN_LINES: usize = 1;
/// Max size of the window title stack.
const TITLE_STACK_MAX_DEPTH: usize = 4096;
/// Default semantic escape characters.
pub const SEMANTIC_ESCAPE_CHARS: &str = ",│`|:\"' ()[]{}<>\t";
/// Max size of the keyboard modes.
const KEYBOARD_MODE_STACK_MAX_DEPTH: usize = TITLE_STACK_MAX_DEPTH;
/// Default tab interval, corresponding to terminfo `it` value.
const INITIAL_TABSTOPS: usize = 8;
bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct TermMode: u32 {
const NONE = 0;
const SHOW_CURSOR = 1;
const APP_CURSOR = 1 << 1;
const APP_KEYPAD = 1 << 2;
const MOUSE_REPORT_CLICK = 1 << 3;
const BRACKETED_PASTE = 1 << 4;
const SGR_MOUSE = 1 << 5;
const MOUSE_MOTION = 1 << 6;
const LINE_WRAP = 1 << 7;
const LINE_FEED_NEW_LINE = 1 << 8;
const ORIGIN = 1 << 9;
const INSERT = 1 << 10;
const FOCUS_IN_OUT = 1 << 11;
const ALT_SCREEN = 1 << 12;
const MOUSE_DRAG = 1 << 13;
const UTF8_MOUSE = 1 << 14;
const ALTERNATE_SCROLL = 1 << 15;
const VI = 1 << 16;
const URGENCY_HINTS = 1 << 17;
const DISAMBIGUATE_ESC_CODES = 1 << 18;
const REPORT_EVENT_TYPES = 1 << 19;
const REPORT_ALTERNATE_KEYS = 1 << 20;
const REPORT_ALL_KEYS_AS_ESC = 1 << 21;
const REPORT_ASSOCIATED_TEXT = 1 << 22;
const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();
const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()
| Self::REPORT_EVENT_TYPES.bits()
| Self::REPORT_ALTERNATE_KEYS.bits()
| Self::REPORT_ALL_KEYS_AS_ESC.bits()
| Self::REPORT_ASSOCIATED_TEXT.bits();
const ANY = u32::MAX;
}
}
impl From<KeyboardModes> for TermMode {
fn from(value: KeyboardModes) -> Self {
let mut mode = Self::empty();
let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);
mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);
let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);
mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);
let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);
mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);
let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);
mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);
let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);
mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);
mode
}
}
impl Default for TermMode {
fn default() -> TermMode {
TermMode::SHOW_CURSOR
| TermMode::LINE_WRAP
| TermMode::ALTERNATE_SCROLL
| TermMode::URGENCY_HINTS
}
}
/// Convert a terminal point to a viewport relative point.
#[inline]
pub fn point_to_viewport(display_offset: usize, point: Point) -> Option<Point<usize>> {
let viewport_line = point.line.0 + display_offset as i32;
usize::try_from(viewport_line).ok().map(|line| Point::new(line, point.column))
}
/// Convert a viewport relative point to a terminal point.
#[inline]
pub fn viewport_to_point(display_offset: usize, point: Point<usize>) -> Point {
let line = Line(point.line as i32) - display_offset;
Point::new(line, point.column)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LineDamageBounds {
/// Damaged line number.
pub line: usize,
/// Leftmost damaged column.
pub left: usize,
/// Rightmost damaged column.
pub right: usize,
}
impl LineDamageBounds {
#[inline]
pub fn new(line: usize, left: usize, right: usize) -> Self {
Self { line, left, right }
}
#[inline]
pub fn undamaged(line: usize, num_cols: usize) -> Self {
Self { line, left: num_cols, right: 0 }
}
#[inline]
pub fn reset(&mut self, num_cols: usize) {
*self = Self::undamaged(self.line, num_cols);
}
#[inline]
pub fn expand(&mut self, left: usize, right: usize) {
self.left = cmp::min(self.left, left);
self.right = cmp::max(self.right, right);
}
#[inline]
pub fn is_damaged(&self) -> bool {
self.left <= self.right
}
}
/// Terminal damage information collected since the last [`Term::reset_damage`] call.
#[derive(Debug)]
pub enum TermDamage<'a> {
/// The entire terminal is damaged.
Full,
/// Iterator over damaged lines in the terminal.
Partial(TermDamageIterator<'a>),
}
/// Iterator over the terminal's viewport damaged lines.
#[derive(Clone, Debug)]
pub struct TermDamageIterator<'a> {
line_damage: slice::Iter<'a, LineDamageBounds>,
display_offset: usize,
}
impl<'a> TermDamageIterator<'a> {
pub fn new(line_damage: &'a [LineDamageBounds], display_offset: usize) -> Self {
let num_lines = line_damage.len();
// Filter out invisible damage.
let line_damage = &line_damage[..num_lines.saturating_sub(display_offset)];
Self { display_offset, line_damage: line_damage.iter() }
}
}
impl Iterator for TermDamageIterator<'_> {
type Item = LineDamageBounds;
fn next(&mut self) -> Option<Self::Item> {
self.line_damage.find_map(|line| {
line.is_damaged().then_some(LineDamageBounds::new(
line.line + self.display_offset,
line.left,
line.right,
))
})
}
}
/// State of the terminal damage.
struct TermDamageState {
/// Hint whether terminal should be damaged entirely regardless of the actual damage changes.
full: bool,
/// Information about damage on terminal lines.
lines: Vec<LineDamageBounds>,
/// Old terminal cursor point.
last_cursor: Point,
}
impl TermDamageState {
fn new(num_cols: usize, num_lines: usize) -> Self {
let lines =
(0..num_lines).map(|line| LineDamageBounds::undamaged(line, num_cols)).collect();
Self { full: true, lines, last_cursor: Default::default() }
}
#[inline]
fn resize(&mut self, num_cols: usize, num_lines: usize) {
// Reset point, so old cursor won't end up outside of the viewport.
self.last_cursor = Default::default();
self.full = true;
self.lines.clear();
self.lines.reserve(num_lines);
for line in 0..num_lines {
self.lines.push(LineDamageBounds::undamaged(line, num_cols));
}
}
/// Damage point inside of the viewport.
#[inline]
fn damage_point(&mut self, point: Point<usize>) {
self.damage_line(point.line, point.column.0, point.column.0);
}
/// Expand `line`'s damage to span at least `left` to `right` column.
#[inline]
fn damage_line(&mut self, line: usize, left: usize, right: usize) {
self.lines[line].expand(left, right);
}
/// Reset information about terminal damage.
fn reset(&mut self, num_cols: usize) {
self.full = false;
self.lines.iter_mut().for_each(|line| line.reset(num_cols));
}
}
pub struct Term<T> {
/// Terminal focus controlling the cursor shape.
pub is_focused: bool,
/// Cursor for keyboard selection.
pub vi_mode_cursor: ViModeCursor,
pub selection: Option<Selection>,
/// Currently active grid.
///
/// Tracks the screen buffer currently in use. While the alternate screen buffer is active,
/// this will be the alternate grid. Otherwise it is the primary screen buffer.
grid: Grid<Cell>,
/// Currently inactive grid.
///
/// Opposite of the active grid. While the alternate screen buffer is active, this will be the
/// primary grid. Otherwise it is the alternate screen buffer.
inactive_grid: Grid<Cell>,
/// Index into `charsets`, pointing to what ASCII is currently being mapped to.
active_charset: CharsetIndex,
/// Tabstops.
tabs: TabStops,
/// Mode flags.
mode: TermMode,
/// Scroll region.
///
/// Range going from top to bottom of the terminal, indexed from the top of the viewport.
scroll_region: Range<Line>,
/// Modified terminal colors.
colors: Colors,
/// Current style of the cursor.
cursor_style: Option<CursorStyle>,
/// Proxy for sending events to the event loop.
event_proxy: T,
/// Current title of the window.
title: Option<String>,
/// Stack of saved window titles. When a title is popped from this stack, the `title` for the
/// term is set.
title_stack: Vec<Option<String>>,
/// The stack for the keyboard modes.
keyboard_mode_stack: Vec<KeyboardModes>,
/// Currently inactive keyboard mode stack.
inactive_keyboard_mode_stack: Vec<KeyboardModes>,
/// Information about damaged cells.
damage: TermDamageState,
/// Config directly for the terminal.
config: Config,
}
/// Configuration options for the [`Term`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
/// The maximum amount of scrolling history.
pub scrolling_history: usize,
/// Default cursor style to reset the cursor to.
pub default_cursor_style: CursorStyle,
/// Cursor style for Vi mode.
pub vi_mode_cursor_style: Option<CursorStyle>,
/// The characters which terminate semantic selection.
///
/// The default value is [`SEMANTIC_ESCAPE_CHARS`].
pub semantic_escape_chars: String,
/// Whether to enable kitty keyboard protocol.
pub kitty_keyboard: bool,
/// OSC52 support mode.
pub osc52: Osc52,
}
impl Default for Config {
fn default() -> Self {
Self {
scrolling_history: 10000,
semantic_escape_chars: SEMANTIC_ESCAPE_CHARS.to_owned(),
default_cursor_style: Default::default(),
vi_mode_cursor_style: Default::default(),
kitty_keyboard: Default::default(),
osc52: Default::default(),
}
}
}
/// OSC 52 behavior.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum Osc52 {
/// The handling of the escape sequence is disabled.
Disabled,
/// Only copy sequence is accepted.
///
/// This option is the default as a compromise between entirely
/// disabling it (the most secure) and allowing `paste` (the less secure).
#[default]
OnlyCopy,
/// Only paste sequence is accepted.
OnlyPaste,
/// Both are accepted.
CopyPaste,
}
impl<T> Term<T> {
#[inline]
pub fn scroll_display(&mut self, scroll: Scroll)
where
T: EventListener,
{
let old_display_offset = self.grid.display_offset();
self.grid.scroll_display(scroll);
self.event_proxy.send_event(Event::MouseCursorDirty);
// Clamp vi mode cursor to the viewport.
let viewport_start = -(self.grid.display_offset() as i32);
let viewport_end = viewport_start + self.bottommost_line().0;
let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;
*vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));
self.vi_mode_recompute_selection();
// Damage everything if display offset changed.
if old_display_offset != self.grid().display_offset() {
self.mark_fully_damaged();
}
}
pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {
let num_cols = dimensions.columns();
let num_lines = dimensions.screen_lines();
let history_size = config.scrolling_history;
let grid = Grid::new(num_lines, num_cols, history_size);
let inactive_grid = Grid::new(num_lines, num_cols, 0);
let tabs = TabStops::new(grid.columns());
let scroll_region = Line(0)..Line(grid.screen_lines() as i32);
// Initialize terminal damage, covering the entire terminal upon launch.
let damage = TermDamageState::new(num_cols, num_lines);
Term {
inactive_grid,
scroll_region,
event_proxy,
damage,
config,
grid,
tabs,
inactive_keyboard_mode_stack: Default::default(),
keyboard_mode_stack: Default::default(),
active_charset: Default::default(),
vi_mode_cursor: Default::default(),
cursor_style: Default::default(),
colors: color::Colors::default(),
title_stack: Default::default(),
is_focused: Default::default(),
selection: Default::default(),
title: Default::default(),
mode: Default::default(),
}
}
/// Collect the information about the changes in the lines, which
/// could be used to minimize the amount of drawing operations.
///
/// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the
/// collected damage state. Those could easily be tracked by comparing their old and new
/// value between adjacent frames.
///
/// After reading damage [`reset_damage`] should be called.
///
/// [`reset_damage`]: Self::reset_damage
#[must_use]
pub fn damage(&mut self) -> TermDamage<'_> {
// Ensure the entire terminal is damaged after entering insert mode.
// Leaving is handled in the ansi handler.
if self.mode.contains(TermMode::INSERT) {
self.mark_fully_damaged();
}
let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);
if self.damage.full {
return TermDamage::Full;
}
// Add information about old cursor position and new one if they are not the same, so we
// cover everything that was produced by `Term::input`.
if self.damage.last_cursor != previous_cursor {
// Cursor coordinates are always inside viewport even if you have `display_offset`.
let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);
self.damage.damage_point(point);
}
// Always damage current cursor.
self.damage_cursor();
// NOTE: damage which changes all the content when the display offset is non-zero (e.g.
// scrolling) is handled via full damage.
let display_offset = self.grid().display_offset();
TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))
}
/// Resets the terminal damage information.
pub fn reset_damage(&mut self) {
self.damage.reset(self.columns());
}
#[inline]
fn mark_fully_damaged(&mut self) {
self.damage.full = true;
}
/// Set new options for the [`Term`].
pub fn set_options(&mut self, options: Config)
where
T: EventListener,
{
let old_config = mem::replace(&mut self.config, options);
let title_event = match &self.title {
Some(title) => Event::Title(title.clone()),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
if self.mode.contains(TermMode::ALT_SCREEN) {
self.inactive_grid.update_history(self.config.scrolling_history);
} else {
self.grid.update_history(self.config.scrolling_history);
}
if self.config.kitty_keyboard != old_config.kitty_keyboard {
self.keyboard_mode_stack = Vec::new();
self.inactive_keyboard_mode_stack = Vec::new();
self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);
}
// Damage everything on config updates.
self.mark_fully_damaged();
}
/// Convert the active selection to a String.
pub fn selection_to_string(&self) -> Option<String> {
let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;
let SelectionRange { start, end, .. } = selection_range;
let mut res = String::new();
match self.selection.as_ref() {
Some(Selection { ty: SelectionType::Block, .. }) => {
for line in (start.line.0..end.line.0).map(Line::from) {
res += self
.line_to_string(line, start.column..end.column, start.column.0 != 0)
.trim_end();
res += "\n";
}
res += self.line_to_string(end.line, start.column..end.column, true).trim_end();
},
Some(Selection { ty: SelectionType::Lines, .. }) => {
res = self.bounds_to_string(start, end) + "\n";
},
_ => {
res = self.bounds_to_string(start, end);
},
}
Some(res)
}
/// Convert range between two points to a String.
pub fn bounds_to_string(&self, start: Point, end: Point) -> String {
let mut res = String::new();
for line in (start.line.0..=end.line.0).map(Line::from) {
let start_col = if line == start.line { start.column } else { Column(0) };
let end_col = if line == end.line { end.column } else { self.last_column() };
res += &self.line_to_string(line, start_col..end_col, line == end.line);
}
res.strip_suffix('\n').map(str::to_owned).unwrap_or(res)
}
/// Convert a single line in the grid to a String.
fn line_to_string(
&self,
line: Line,
mut cols: Range<Column>,
include_wrapped_wide: bool,
) -> String {
let mut text = String::new();
let grid_line = &self.grid[line];
let line_length = cmp::min(grid_line.line_length(), cols.end + 1);
// Include wide char when trailing spacer is selected.
if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {
cols.start -= 1;
}
let mut tab_mode = false;
for column in (cols.start.0..line_length.0).map(Column::from) {
let cell = &grid_line[column];
// Skip over cells until next tab-stop once a tab was found.
if tab_mode {
if self.tabs[column] || cell.c != ' ' {
tab_mode = false;
} else {
continue;
}
}
if cell.c == '\t' {
tab_mode = true;
}
if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {
// Push cells primary character.
text.push(cell.c);
// Push zero-width characters.
for c in cell.zerowidth().into_iter().flatten() {
text.push(*c);
}
}
}
if cols.end >= self.columns() - 1
&& (line_length.0 == 0
|| !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))
{
text.push('\n');
}
// If wide char is not part of the selection, but leading spacer is, include it.
if line_length == self.columns()
&& line_length.0 >= 2
&& grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)
&& include_wrapped_wide
{
text.push(self.grid[line - 1i32][Column(0)].c);
}
text
}
/// Terminal content required for rendering.
#[inline]
pub fn renderable_content(&self) -> RenderableContent<'_>
where
T: EventListener,
{
RenderableContent::new(self)
}
/// Access to the raw grid data structure.
pub fn grid(&self) -> &Grid<Cell> {
&self.grid
}
/// Mutable access to the raw grid data structure.
pub fn grid_mut(&mut self) -> &mut Grid<Cell> {
&mut self.grid
}
/// Resize terminal to new dimensions.
pub fn resize<S: Dimensions>(&mut self, size: S) {
let old_cols = self.columns();
let old_lines = self.screen_lines();
let num_cols = size.columns();
let num_lines = size.screen_lines();
if old_cols == num_cols && old_lines == num_lines {
debug!("Term::resize dimensions unchanged");
return;
}
debug!("New num_cols is {} and num_lines is {}", num_cols, num_lines);
// Move vi mode cursor with the content.
let history_size = self.history_size();
let mut delta = num_lines as i32 - old_lines as i32;
let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);
delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);
self.vi_mode_cursor.point.line += delta;
let is_alt = self.mode.contains(TermMode::ALT_SCREEN);
self.grid.resize(!is_alt, num_lines, num_cols);
self.inactive_grid.resize(is_alt, num_lines, num_cols);
// Invalidate selection and tabs only when necessary.
if old_cols != num_cols {
self.selection = None;
// Recreate tabs list.
self.tabs.resize(num_cols);
} else if let Some(selection) = self.selection.take() {
let max_lines = cmp::max(num_lines, old_lines) as i32;
let range = Line(0)..Line(max_lines);
self.selection = selection.rotate(self, &range, -delta);
}
// Clamp vi cursor to viewport.
let vi_point = self.vi_mode_cursor.point;
let viewport_top = Line(-(self.grid.display_offset() as i32));
let viewport_bottom = viewport_top + self.bottommost_line();
self.vi_mode_cursor.point.line =
cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);
self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());
// Reset scrolling region.
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
// Resize damage information.
self.damage.resize(num_cols, num_lines);
}
/// Active terminal modes.
#[inline]
pub fn mode(&self) -> &TermMode {
&self.mode
}
/// Swap primary and alternate screen buffer.
pub fn swap_alt(&mut self) {
if !self.mode.contains(TermMode::ALT_SCREEN) {
// Set alt screen cursor to the current primary screen cursor.
self.inactive_grid.cursor = self.grid.cursor.clone();
// Drop information about the primary screens saved cursor.
self.grid.saved_cursor = self.grid.cursor.clone();
// Reset alternate screen contents.
self.inactive_grid.reset_region(..);
}
mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);
let keyboard_mode =
self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();
self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);
mem::swap(&mut self.grid, &mut self.inactive_grid);
self.mode ^= TermMode::ALT_SCREEN;
self.selection = None;
self.mark_fully_damaged();
}
/// Scroll screen down.
///
/// Text moves down; clear at bottom
/// Expects origin to be in scroll range.
#[inline]
fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling down relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection =
self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));
// Scroll vi mode cursor.
let line = &mut self.vi_mode_cursor.point.line;
if region.start <= *line && region.end > *line {
*line = cmp::min(*line + lines, region.end - 1);
}
// Scroll between origin and bottom
self.grid.scroll_down(®ion, lines);
self.mark_fully_damaged();
}
/// Scroll screen up
///
/// Text moves up; clear at top
/// Expects origin to be in scroll range.
#[inline]
fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling up relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));
self.grid.scroll_up(®ion, lines);
// Scroll vi mode cursor.
let viewport_top = Line(-(self.grid.display_offset() as i32));
let top = if region.start == 0 { viewport_top } else { region.start };
let line = &mut self.vi_mode_cursor.point.line;
if (top <= *line) && region.end > *line {
*line = cmp::max(*line - lines, top);
}
self.mark_fully_damaged();
}
fn deccolm(&mut self)
where
T: EventListener,
{
// Setting 132 column font makes no sense, but run the other side effects.
// Clear scrolling region.
self.set_scrolling_region(1, None);
// Clear grid.
self.grid.reset_region(..);
self.mark_fully_damaged();
}
#[inline]
pub fn exit(&mut self)
where
T: EventListener,
{
self.event_proxy.send_event(Event::Exit);
}
/// Toggle the vi mode.
#[inline]
pub fn toggle_vi_mode(&mut self)
where
T: EventListener,
{
self.mode ^= TermMode::VI;
if self.mode.contains(TermMode::VI) {
let display_offset = self.grid.display_offset() as i32;
if self.grid.cursor.point.line > self.bottommost_line() - display_offset {
// Move cursor to top-left if terminal cursor is not visible.
let point = Point::new(Line(-display_offset), Column(0));
self.vi_mode_cursor = ViModeCursor::new(point);
} else {
// Reset vi mode cursor position to match primary cursor.
self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);
}
}
// Update UI about cursor blinking state changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
/// Move vi mode cursor.
#[inline]
pub fn vi_motion(&mut self, motion: ViMotion)
where
T: EventListener,
{
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Move cursor.
self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);
self.vi_mode_recompute_selection();
}
/// Move vi cursor to a point in the grid.
#[inline]
pub fn vi_goto_point(&mut self, point: Point)
where
T: EventListener,
{
// Move viewport to make point visible.
self.scroll_to_point(point);
// Move vi cursor to the point.
self.vi_mode_cursor.point = point;
self.vi_mode_recompute_selection();
}
/// Update the active selection to match the vi mode cursor position.
#[inline]
fn vi_mode_recompute_selection(&mut self) {
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Update only if non-empty selection is present.
if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {
selection.update(self.vi_mode_cursor.point, Side::Left);
selection.include_all();
}
}
/// Scroll display to point if it is outside of viewport.
pub fn scroll_to_point(&mut self, point: Point)
where
T: EventListener,
{
let display_offset = self.grid.display_offset() as i32;
let screen_lines = self.grid.screen_lines() as i32;
if point.line < -display_offset {
let lines = point.line + display_offset;
self.scroll_display(Scroll::Delta(-lines.0));
} else if point.line >= (screen_lines - display_offset) {
let lines = point.line + display_offset - screen_lines + 1i32;
self.scroll_display(Scroll::Delta(-lines.0));
}
}
/// Jump to the end of a wide cell.
pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {
let flags = self.grid[point.line][point.column].flags;
match direction {
Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
point.column = Column(1);
point.line += 1;
},
Direction::Right if flags.contains(Flags::WIDE_CHAR) => {
point.column = cmp::min(point.column + 1, self.last_column());
},
Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {
if flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
let prev = point.sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
point = prev;
}
},
_ => (),
}
point
}
#[inline]
pub fn semantic_escape_chars(&self) -> &str {
&self.config.semantic_escape_chars
}
#[cfg(test)]
pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {
self.config.semantic_escape_chars = semantic_escape_chars.into();
}
/// Active terminal cursor style.
///
/// While vi mode is active, this will automatically return the vi mode cursor style.
#[inline]
pub fn cursor_style(&self) -> CursorStyle {
let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);
if self.mode.contains(TermMode::VI) {
self.config.vi_mode_cursor_style.unwrap_or(cursor_style)
} else {
cursor_style
}
}
pub fn colors(&self) -> &Colors {
&self.colors
}
/// Insert a linebreak at the current cursor position.
#[inline]
fn wrapline(&mut self)
where
T: EventListener,
{
if !self.mode.contains(TermMode::LINE_WRAP) {
return;
}
trace!("Wrapping input");
self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);
if self.grid.cursor.point.line + 1 >= self.scroll_region.end {
self.linefeed();
} else {
self.damage_cursor();
self.grid.cursor.point.line += 1;
}
self.grid.cursor.point.column = Column(0);
self.grid.cursor.input_needs_wrap = false;
self.damage_cursor();
}
/// Write `c` to the cell at the cursor position.
#[inline(always)]
fn write_at_cursor(&mut self, c: char) {
let c = self.grid.cursor.charsets[self.active_charset].map(c);
let fg = self.grid.cursor.template.fg;
let bg = self.grid.cursor.template.bg;
let flags = self.grid.cursor.template.flags;
let extra = self.grid.cursor.template.extra.clone();
let mut cursor_cell = self.grid.cursor_cell();
// Clear all related cells when overwriting a fullwidth cell.
if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {
// Remove wide char and spacer.
let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);
let point = self.grid.cursor.point;
if wide && point.column < self.last_column() {
self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);
} else if point.column > 0 {
self.grid[point.line][point.column - 1].clear_wide();
}
// Remove leading spacers.
if point.column <= 1 && point.line != self.topmost_line() {
let column = self.last_column();
self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
}
cursor_cell = self.grid.cursor_cell();
}
cursor_cell.c = c;
cursor_cell.fg = fg;
cursor_cell.bg = bg;
cursor_cell.flags = flags;
cursor_cell.extra = extra;
}
#[inline]
fn damage_cursor(&mut self) {
// The normal cursor coordinates are always in viewport.
let point =
Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);
self.damage.damage_point(point);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {
let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;
self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;
let new_mode = match apply {
KeyboardModesApplyBehavior::Replace => mode,
KeyboardModesApplyBehavior::Union => active_mode.union(mode),
KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),
};
trace!("Setting keyboard mode to {new_mode:?}");
self.mode |= new_mode;
}
}
impl<T> Dimensions for Term<T> {
#[inline]
fn columns(&self) -> usize {
self.grid.columns()
}
#[inline]
fn screen_lines(&self) -> usize {
self.grid.screen_lines()
}
#[inline]
fn total_lines(&self) -> usize {
self.grid.total_lines()
}
}
impl<T: EventListener> Handler for Term<T> {
/// A character to be displayed.
#[inline(never)]
fn input(&mut self, c: char) {
// Number of cells the char will occupy.
let width = match c.width() {
Some(width) => width,
None => return,
};
// Handle zero-width characters.
if width == 0 {
// Get previous column.
let mut column = self.grid.cursor.point.column;
if !self.grid.cursor.input_needs_wrap {
column.0 = column.saturating_sub(1);
}
// Put zerowidth characters over first fullwidth character cell.
let line = self.grid.cursor.point.line;
if self.grid[line][column].flags.contains(Flags::WIDE_CHAR_SPACER) {
column.0 = column.saturating_sub(1);
}
self.grid[line][column].push_zerowidth(c);
return;
}
// Move cursor to next line.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
}
// If in insert mode, first shift cells to the right.
let columns = self.columns();
if self.mode.contains(TermMode::INSERT) && self.grid.cursor.point.column + width < columns {
let line = self.grid.cursor.point.line;
let col = self.grid.cursor.point.column;
let row = &mut self.grid[line][..];
for col in (col.0..(columns - width)).rev() {
row.swap(col + width, col);
}
}
if width == 1 {
self.write_at_cursor(c);
} else {
if self.grid.cursor.point.column + 1 >= columns {
if self.mode.contains(TermMode::LINE_WRAP) {
// Insert placeholder before wide char if glyph does not fit in this row.
self.grid.cursor.template.flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
self.wrapline();
} else {
// Prevent out of bounds crash when linewrapping is disabled.
self.grid.cursor.input_needs_wrap = true;
return;
}
}
// Write full width glyph to current cursor cell.
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR);
self.write_at_cursor(c);
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR);
// Write spacer to cell following the wide glyph.
self.grid.cursor.point.column += 1;
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR_SPACER);
}
if self.grid.cursor.point.column + 1 < columns {
self.grid.cursor.point.column += 1;
} else {
self.grid.cursor.input_needs_wrap = true;
}
}
#[inline]
fn decaln(&mut self) {
trace!("Decalnning");
for line in (0..self.screen_lines()).map(Line::from) {
for column in 0..self.columns() {
let cell = &mut self.grid[line][Column(column)];
*cell = Cell::default();
cell.c = 'E';
}
}
self.mark_fully_damaged();
}
#[inline]
fn goto(&mut self, line: i32, col: usize) {
let line = Line(line);
let col = Column(col);
trace!("Going to: line={}, col={}", line, col);
let (y_offset, max_y) = if self.mode.contains(TermMode::ORIGIN) {
(self.scroll_region.start, self.scroll_region.end - 1)
} else {
(Line(0), self.bottommost_line())
};
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(cmp::min(line + y_offset, max_y), Line(0));
self.grid.cursor.point.column = cmp::min(col, self.last_column());
self.damage_cursor();
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn goto_line(&mut self, line: i32) {
trace!("Going to line: {}", line);
self.goto(line, self.grid.cursor.point.column.0)
}
#[inline]
fn goto_col(&mut self, col: usize) {
trace!("Going to column: {}", col);
self.goto(self.grid.cursor.point.line.0, col)
}
#[inline]
fn insert_blank(&mut self, count: usize) {
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure inserting within terminal bounds
let count = cmp::min(count, self.columns() - cursor.point.column.0);
let source = cursor.point.column;
let destination = cursor.point.column.0 + count;
let num_cells = self.columns() - destination;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in (0..num_cells).rev() {
row.swap(destination + offset, source.0 + offset);
}
// Cells were just moved out toward the end of the line;
// fill in between source and dest with blanks.
for cell in &mut row[source.0..destination] {
*cell = bg.into();
}
}
#[inline]
fn move_up(&mut self, lines: usize) {
trace!("Moving up: {}", lines);
let line = self.grid.cursor.point.line - lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_down(&mut self, lines: usize) {
trace!("Moving down: {}", lines);
let line = self.grid.cursor.point.line + lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_forward(&mut self, cols: usize) {
trace!("Moving forward: {}", cols);
let last_column = cmp::min(self.grid.cursor.point.column + cols, self.last_column());
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, self.grid.cursor.point.column.0, last_column.0);
self.grid.cursor.point.column = last_column;
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn move_backward(&mut self, cols: usize) {
trace!("Moving backward: {}", cols);
let column = self.grid.cursor.point.column.saturating_sub(cols);
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, column, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(column);
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn identify_terminal(&mut self, intermediate: Option<char>) {
match intermediate {
None => {
trace!("Reporting primary device attributes");
let text = String::from("\x1b[?6c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
Some('>') => {
trace!("Reporting secondary device attributes");
let version = version_number(env!("CARGO_PKG_VERSION"));
let text = format!("\x1b[>0;{version};1c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("Unsupported device attributes intermediate"),
}
}
#[inline]
fn report_keyboard_mode(&mut self) {
if !self.config.kitty_keyboard {
return;
}
trace!("Reporting active keyboard mode");
let current_mode =
self.keyboard_mode_stack.last().unwrap_or(&KeyboardModes::NO_MODE).bits();
let text = format!("\x1b[?{current_mode}u");
self.event_proxy.send_event(Event::PtyWrite(text));
}
#[inline]
fn push_keyboard_mode(&mut self, mode: KeyboardModes) {
if !self.config.kitty_keyboard {
return;
}
trace!("Pushing `{mode:?}` keyboard mode into the stack");
if self.keyboard_mode_stack.len() >= KEYBOARD_MODE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of keyboard mode stack that exceeds its maximum depth",
removed
);
}
self.keyboard_mode_stack.push(mode);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn pop_keyboard_modes(&mut self, to_pop: u16) {
if !self.config.kitty_keyboard {
return;
}
trace!("Attempting to pop {to_pop} keyboard modes from the stack");
let new_len = self.keyboard_mode_stack.len().saturating_sub(to_pop as usize);
self.keyboard_mode_stack.truncate(new_len);
// Reload active mode.
let mode = self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: KeyboardModes, apply: KeyboardModesApplyBehavior) {
if !self.config.kitty_keyboard {
return;
}
self.set_keyboard_mode(mode.into(), apply);
}
#[inline]
fn device_status(&mut self, arg: usize) {
trace!("Reporting device status: {}", arg);
match arg {
5 => {
let text = String::from("\x1b[0n");
self.event_proxy.send_event(Event::PtyWrite(text));
},
6 => {
let pos = self.grid.cursor.point;
let text = format!("\x1b[{};{}R", pos.line + 1, pos.column + 1);
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("unknown device status query: {}", arg),
};
}
#[inline]
fn move_down_and_cr(&mut self, lines: usize) {
trace!("Moving down and cr: {}", lines);
let line = self.grid.cursor.point.line + lines;
self.goto(line.0, 0)
}
#[inline]
fn move_up_and_cr(&mut self, lines: usize) {
trace!("Moving up and cr: {}", lines);
let line = self.grid.cursor.point.line - lines;
self.goto(line.0, 0)
}
/// Insert tab at cursor position.
#[inline]
fn put_tab(&mut self, mut count: u16) {
// A tab after the last column is the same as a linebreak.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
return;
}
while self.grid.cursor.point.column < self.columns() && count != 0 {
count -= 1;
let c = self.grid.cursor.charsets[self.active_charset].map('\t');
let cell = self.grid.cursor_cell();
if cell.c == ' ' {
cell.c = c;
}
loop {
if (self.grid.cursor.point.column + 1) == self.columns() {
break;
}
self.grid.cursor.point.column += 1;
if self.tabs[self.grid.cursor.point.column] {
break;
}
}
}
}
/// Backspace.
#[inline]
fn backspace(&mut self) {
trace!("Backspace");
if self.grid.cursor.point.column > Column(0) {
let line = self.grid.cursor.point.line.0 as usize;
let column = self.grid.cursor.point.column.0;
self.grid.cursor.point.column -= 1;
self.grid.cursor.input_needs_wrap = false;
self.damage.damage_line(line, column - 1, column);
}
}
/// Carriage return.
#[inline]
fn carriage_return(&mut self) {
trace!("Carriage return");
let new_col = 0;
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, new_col, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(new_col);
self.grid.cursor.input_needs_wrap = false;
}
/// Linefeed.
#[inline]
fn linefeed(&mut self) {
trace!("Linefeed");
let next = self.grid.cursor.point.line + 1;
if next == self.scroll_region.end {
self.scroll_up(1);
} else if next < self.screen_lines() {
self.damage_cursor();
self.grid.cursor.point.line += 1;
self.damage_cursor();
}
}
/// Set current position as a tabstop.
#[inline]
fn bell(&mut self) {
trace!("Bell");
self.event_proxy.send_event(Event::Bell);
}
#[inline]
fn substitute(&mut self) {
trace!("[unimplemented] Substitute");
}
/// Run LF/NL.
///
/// LF/NL mode has some interesting history. According to ECMA-48 4th
/// edition, in LINE FEED mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause only movement of the active position in
/// > the direction of the line progression.
///
/// In NEW LINE mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause movement to the line home position on
/// > the following line, the following form, etc. In the case of LF this is
/// > referred to as the New Line (NL) option.
///
/// Additionally, ECMA-48 4th edition says that this option is deprecated.
/// ECMA-48 5th edition only mentions this option (without explanation)
/// saying that it's been removed.
///
/// As an emulator, we need to support it since applications may still rely
/// on it.
#[inline]
fn newline(&mut self) {
self.linefeed();
if self.mode.contains(TermMode::LINE_FEED_NEW_LINE) {
self.carriage_return();
}
}
#[inline]
fn set_horizontal_tabstop(&mut self) {
trace!("Setting horizontal tabstop");
self.tabs[self.grid.cursor.point.column] = true;
}
#[inline]
fn scroll_up(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_up_relative(origin, lines);
}
#[inline]
fn scroll_down(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_down_relative(origin, lines);
}
#[inline]
fn insert_blank_lines(&mut self, lines: usize) {
trace!("Inserting blank {} lines", lines);
let origin = self.grid.cursor.point.line;
if self.scroll_region.contains(&origin) {
self.scroll_down_relative(origin, lines);
}
}
#[inline]
fn delete_lines(&mut self, lines: usize) {
let origin = self.grid.cursor.point.line;
let lines = cmp::min(self.screen_lines() - origin.0 as usize, lines);
trace!("Deleting {} lines", lines);
if lines > 0 && self.scroll_region.contains(&origin) {
self.scroll_up_relative(origin, lines);
}
}
#[inline]
fn erase_chars(&mut self, count: usize) {
let cursor = &self.grid.cursor;
trace!("Erasing chars: count={}, col={}", count, cursor.point.column);
let start = cursor.point.column;
let end = cmp::min(start + count, Column(self.columns()));
// Cleared cells have current background color set.
let bg = self.grid.cursor.template.bg;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, start.0, end.0);
let row = &mut self.grid[line];
for cell in &mut row[start..end] {
*cell = bg.into();
}
}
#[inline]
fn delete_chars(&mut self, count: usize) {
let columns = self.columns();
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure deleting within terminal bounds.
let count = cmp::min(count, columns);
let start = cursor.point.column.0;
let end = cmp::min(start + count, columns - 1);
let num_cells = columns - end;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in 0..num_cells {
row.swap(start + offset, end + offset);
}
// Clear last `count` cells in the row. If deleting 1 char, need to delete
// 1 cell.
let end = columns - count;
for cell in &mut row[end..] {
*cell = bg.into();
}
}
#[inline]
fn move_backward_tabs(&mut self, count: u16) {
trace!("Moving backward {} tabs", count);
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == 0 {
break;
}
for i in (0..(col.0)).rev() {
if self.tabs[index::Column(i)] {
col = index::Column(i);
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, self.grid.cursor.point.column.0, old_col);
}
#[inline]
fn move_forward_tabs(&mut self, count: u16) {
trace!("Moving forward {} tabs", count);
let num_cols = self.columns();
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == num_cols - 1 {
break;
}
for i in col.0 + 1..num_cols {
col = index::Column(i);
if self.tabs[col] {
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, old_col, self.grid.cursor.point.column.0);
}
#[inline]
fn save_cursor_position(&mut self) {
trace!("Saving cursor position");
self.grid.saved_cursor = self.grid.cursor.clone();
}
#[inline]
fn restore_cursor_position(&mut self) {
trace!("Restoring cursor position");
self.damage_cursor();
self.grid.cursor = self.grid.saved_cursor.clone();
self.damage_cursor();
}
#[inline]
fn clear_line(&mut self, mode: ansi::LineClearMode) {
trace!("Clearing line: {:?}", mode);
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
let point = cursor.point;
let (left, right) = match mode {
ansi::LineClearMode::Right if cursor.input_needs_wrap => return,
ansi::LineClearMode::Right => (point.column, Column(self.columns())),
ansi::LineClearMode::Left => (Column(0), point.column + 1),
ansi::LineClearMode::All => (Column(0), Column(self.columns())),
};
self.damage.damage_line(point.line.0 as usize, left.0, right.0 - 1);
let row = &mut self.grid[point.line];
for cell in &mut row[left..right] {
*cell = bg.into();
}
let range = self.grid.cursor.point.line..=self.grid.cursor.point.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
}
/// Set the indexed color value.
#[inline]
fn set_color(&mut self, index: usize, color: Rgb) {
trace!("Setting color[{}] = {:?}", index, color);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index] != Some(color) {
self.mark_fully_damaged();
}
self.colors[index] = Some(color);
}
/// Respond to a color query escape sequence.
#[inline]
fn dynamic_color_sequence(&mut self, prefix: String, index: usize, terminator: &str) {
trace!("Requested write of escape sequence for color code {}: color[{}]", prefix, index);
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ColorRequest(
index,
Arc::new(move |color| {
format!(
"\x1b]{};rgb:{1:02x}{1:02x}/{2:02x}{2:02x}/{3:02x}{3:02x}{4}",
prefix, color.r, color.g, color.b, terminator
)
}),
));
}
/// Reset the indexed color to original value.
#[inline]
fn reset_color(&mut self, index: usize) {
trace!("Resetting color[{}]", index);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index].is_some() {
self.mark_fully_damaged();
}
self.colors[index] = None;
}
/// Store data into clipboard.
#[inline]
fn clipboard_store(&mut self, clipboard: u8, base64: &[u8]) {
if !matches!(self.config.osc52, Osc52::OnlyCopy | Osc52::CopyPaste) {
debug!("Denied osc52 store");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
if let Ok(bytes) = Base64.decode(base64) {
if let Ok(text) = String::from_utf8(bytes) {
self.event_proxy.send_event(Event::ClipboardStore(clipboard_type, text));
}
}
}
/// Load data from clipboard.
#[inline]
fn clipboard_load(&mut self, clipboard: u8, terminator: &str) {
if !matches!(self.config.osc52, Osc52::OnlyPaste | Osc52::CopyPaste) {
debug!("Denied osc52 load");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ClipboardLoad(
clipboard_type,
Arc::new(move |text| {
let base64 = Base64.encode(text);
format!("\x1b]52;{};{}{}", clipboard as char, base64, terminator)
}),
));
}
#[inline]
fn clear_screen(&mut self, mode: ansi::ClearMode) {
trace!("Clearing screen: {:?}", mode);
let bg = self.grid.cursor.template.bg;
let screen_lines = self.screen_lines();
match mode {
ansi::ClearMode::Above => {
let cursor = self.grid.cursor.point;
// If clearing more than one line.
if cursor.line > 1 {
// Fully clear all lines before the current line.
self.grid.reset_region(..cursor.line);
}
// Clear up to the current column in the current line.
let end = cmp::min(cursor.column + 1, Column(self.columns()));
for cell in &mut self.grid[cursor.line][..end] {
*cell = bg.into();
}
let range = Line(0)..=cursor.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::Below => {
let cursor = self.grid.cursor.point;
for cell in &mut self.grid[cursor.line][cursor.column..] {
*cell = bg.into();
}
if (cursor.line.0 as usize) < screen_lines - 1 {
self.grid.reset_region((cursor.line + 1)..);
}
let range = cursor.line..Line(screen_lines as i32);
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::All => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.grid.reset_region(..);
} else {
let old_offset = self.grid.display_offset();
self.grid.clear_viewport();
// Compute number of lines scrolled by clearing the viewport.
let lines = self.grid.display_offset().saturating_sub(old_offset);
self.vi_mode_cursor.point.line =
(self.vi_mode_cursor.point.line - lines).grid_clamp(self, Boundary::Grid);
}
self.selection = None;
},
ansi::ClearMode::Saved if self.history_size() > 0 => {
self.grid.clear_history();
self.vi_mode_cursor.point.line =
self.vi_mode_cursor.point.line.grid_clamp(self, Boundary::Cursor);
self.selection = self.selection.take().filter(|s| !s.intersects_range(..Line(0)));
},
// We have no history to clear.
ansi::ClearMode::Saved => (),
}
self.mark_fully_damaged();
}
#[inline]
fn clear_tabs(&mut self, mode: ansi::TabulationClearMode) {
trace!("Clearing tabs: {:?}", mode);
match mode {
ansi::TabulationClearMode::Current => {
self.tabs[self.grid.cursor.point.column] = false;
},
ansi::TabulationClearMode::All => {
self.tabs.clear_all();
},
}
}
/// Reset all important fields in the term struct.
#[inline]
fn reset_state(&mut self) {
if self.mode.contains(TermMode::ALT_SCREEN) {
mem::swap(&mut self.grid, &mut self.inactive_grid);
}
self.active_charset = Default::default();
self.cursor_style = None;
self.grid.reset();
self.inactive_grid.reset();
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
self.tabs = TabStops::new(self.columns());
self.title_stack = Vec::new();
self.title = None;
self.selection = None;
self.vi_mode_cursor = Default::default();
self.keyboard_mode_stack = Default::default();
self.inactive_keyboard_mode_stack = Default::default();
// Preserve vi mode across resets.
self.mode &= TermMode::VI;
self.mode.insert(TermMode::default());
self.event_proxy.send_event(Event::CursorBlinkingChange);
self.mark_fully_damaged();
}
#[inline]
fn reverse_index(&mut self) {
trace!("Reversing index");
// If cursor is at the top.
if self.grid.cursor.point.line == self.scroll_region.start {
self.scroll_down(1);
} else {
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(self.grid.cursor.point.line - 1, Line(0));
self.damage_cursor();
}
}
#[inline]
fn set_hyperlink(&mut self, hyperlink: Option<Hyperlink>) {
trace!("Setting hyperlink: {:?}", hyperlink);
self.grid.cursor.template.set_hyperlink(hyperlink.map(|e| e.into()));
}
/// Set a terminal attribute.
#[inline]
fn terminal_attribute(&mut self, attr: Attr) {
trace!("Setting attribute: {:?}", attr);
let cursor = &mut self.grid.cursor;
match attr {
Attr::Foreground(color) => cursor.template.fg = color,
Attr::Background(color) => cursor.template.bg = color,
Attr::UnderlineColor(color) => cursor.template.set_underline_color(color),
Attr::Reset => {
cursor.template.fg = Color::Named(NamedColor::Foreground);
cursor.template.bg = Color::Named(NamedColor::Background);
cursor.template.flags = Flags::empty();
cursor.template.set_underline_color(None);
},
Attr::Reverse => cursor.template.flags.insert(Flags::INVERSE),
Attr::CancelReverse => cursor.template.flags.remove(Flags::INVERSE),
Attr::Bold => cursor.template.flags.insert(Flags::BOLD),
Attr::CancelBold => cursor.template.flags.remove(Flags::BOLD),
Attr::Dim => cursor.template.flags.insert(Flags::DIM),
Attr::CancelBoldDim => cursor.template.flags.remove(Flags::BOLD | Flags::DIM),
Attr::Italic => cursor.template.flags.insert(Flags::ITALIC),
Attr::CancelItalic => cursor.template.flags.remove(Flags::ITALIC),
Attr::Underline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERLINE);
},
Attr::DoubleUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOUBLE_UNDERLINE);
},
Attr::Undercurl => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERCURL);
},
Attr::DottedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOTTED_UNDERLINE);
},
Attr::DashedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DASHED_UNDERLINE);
},
Attr::CancelUnderline => cursor.template.flags.remove(Flags::ALL_UNDERLINES),
Attr::Hidden => cursor.template.flags.insert(Flags::HIDDEN),
Attr::CancelHidden => cursor.template.flags.remove(Flags::HIDDEN),
Attr::Strike => cursor.template.flags.insert(Flags::STRIKEOUT),
Attr::CancelStrike => cursor.template.flags.remove(Flags::STRIKEOUT),
_ => {
debug!("Term got unhandled attr: {:?}", attr);
},
}
}
#[inline]
fn set_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_private_mode", mode);
return;
},
};
trace!("Setting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.insert(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if !self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.insert(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.insert(TermMode::APP_CURSOR),
// Mouse protocols are mutually exclusive.
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.insert(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.insert(TermMode::BRACKETED_PASTE),
// Mouse encodings are mutually exclusive.
NamedPrivateMode::SgrMouse => {
self.mode.remove(TermMode::UTF8_MOUSE);
self.mode.insert(TermMode::SGR_MOUSE);
},
NamedPrivateMode::Utf8Mouse => {
self.mode.remove(TermMode::SGR_MOUSE);
self.mode.insert(TermMode::UTF8_MOUSE);
},
NamedPrivateMode::AlternateScroll => self.mode.insert(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.insert(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.insert(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = true;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn unset_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_private_mode", mode);
return;
},
};
trace!("Unsetting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.remove(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.remove(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.remove(TermMode::APP_CURSOR),
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.remove(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.remove(TermMode::BRACKETED_PASTE),
NamedPrivateMode::SgrMouse => self.mode.remove(TermMode::SGR_MOUSE),
NamedPrivateMode::Utf8Mouse => self.mode.remove(TermMode::UTF8_MOUSE),
NamedPrivateMode::AlternateScroll => self.mode.remove(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.remove(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.remove(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = false;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn report_private_mode(&mut self, mode: PrivateMode) {
trace!("Reporting private mode {mode:?}");
let state = match mode {
PrivateMode::Named(mode) => match mode {
NamedPrivateMode::CursorKeys => self.mode.contains(TermMode::APP_CURSOR).into(),
NamedPrivateMode::Origin => self.mode.contains(TermMode::ORIGIN).into(),
NamedPrivateMode::LineWrap => self.mode.contains(TermMode::LINE_WRAP).into(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking.into()
},
NamedPrivateMode::ShowCursor => self.mode.contains(TermMode::SHOW_CURSOR).into(),
NamedPrivateMode::ReportMouseClicks => {
self.mode.contains(TermMode::MOUSE_REPORT_CLICK).into()
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.contains(TermMode::MOUSE_DRAG).into()
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.contains(TermMode::MOUSE_MOTION).into()
},
NamedPrivateMode::ReportFocusInOut => {
self.mode.contains(TermMode::FOCUS_IN_OUT).into()
},
NamedPrivateMode::Utf8Mouse => self.mode.contains(TermMode::UTF8_MOUSE).into(),
NamedPrivateMode::SgrMouse => self.mode.contains(TermMode::SGR_MOUSE).into(),
NamedPrivateMode::AlternateScroll => {
self.mode.contains(TermMode::ALTERNATE_SCROLL).into()
},
NamedPrivateMode::UrgencyHints => {
self.mode.contains(TermMode::URGENCY_HINTS).into()
},
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
self.mode.contains(TermMode::ALT_SCREEN).into()
},
NamedPrivateMode::BracketedPaste => {
self.mode.contains(TermMode::BRACKETED_PASTE).into()
},
NamedPrivateMode::SyncUpdate => ModeState::Reset,
NamedPrivateMode::ColumnMode => ModeState::NotSupported,
},
PrivateMode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[?{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => self.mode.insert(TermMode::INSERT),
NamedMode::LineFeedNewLine => self.mode.insert(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn unset_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => {
self.mode.remove(TermMode::INSERT);
self.mark_fully_damaged();
},
NamedMode::LineFeedNewLine => self.mode.remove(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn report_mode(&mut self, mode: ansi::Mode) {
trace!("Reporting mode {mode:?}");
let state = match mode {
ansi::Mode::Named(mode) => match mode {
NamedMode::Insert => self.mode.contains(TermMode::INSERT).into(),
NamedMode::LineFeedNewLine => {
self.mode.contains(TermMode::LINE_FEED_NEW_LINE).into()
},
},
ansi::Mode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_scrolling_region(&mut self, top: usize, bottom: Option<usize>) {
// Fallback to the last line as default.
let bottom = bottom.unwrap_or_else(|| self.screen_lines());
if top >= bottom {
debug!("Invalid scrolling region: ({};{})", top, bottom);
return;
}
// Bottom should be included in the range, but range end is not
// usually included. One option would be to use an inclusive
// range, but instead we just let the open range end be 1
// higher.
let start = Line(top as i32 - 1);
let end = Line(bottom as i32);
trace!("Setting scrolling region: ({};{})", start, end);
let screen_lines = Line(self.screen_lines() as i32);
self.scroll_region.start = cmp::min(start, screen_lines);
self.scroll_region.end = cmp::min(end, screen_lines);
self.goto(0, 0);
}
#[inline]
fn set_keypad_application_mode(&mut self) {
trace!("Setting keypad application mode");
self.mode.insert(TermMode::APP_KEYPAD);
}
#[inline]
fn unset_keypad_application_mode(&mut self) {
trace!("Unsetting keypad application mode");
self.mode.remove(TermMode::APP_KEYPAD);
}
#[inline]
fn configure_charset(&mut self, index: CharsetIndex, charset: StandardCharset) {
trace!("Configuring charset {:?} as {:?}", index, charset);
self.grid.cursor.charsets[index] = charset;
}
#[inline]
fn set_active_charset(&mut self, index: CharsetIndex) {
trace!("Setting active charset {:?}", index);
self.active_charset = index;
}
#[inline]
fn set_cursor_style(&mut self, style: Option<CursorStyle>) {
trace!("Setting cursor style {:?}", style);
self.cursor_style = style;
// Notify UI about blinking changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
#[inline]
fn set_cursor_shape(&mut self, shape: CursorShape) {
trace!("Setting cursor shape {:?}", shape);
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.shape = shape;
}
#[inline]
fn set_title(&mut self, title: Option<String>) {
trace!("Setting title to '{:?}'", title);
self.title.clone_from(&title);
let title_event = match title {
Some(title) => Event::Title(title),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
}
#[inline]
fn push_title(&mut self) {
trace!("Pushing '{:?}' onto title stack", self.title);
if self.title_stack.len() >= TITLE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of title stack that exceeds its maximum depth",
removed
);
}
self.title_stack.push(self.title.clone());
}
#[inline]
fn pop_title(&mut self) {
trace!("Attempting to pop title from stack...");
if let Some(popped) = self.title_stack.pop() {
trace!("Title '{:?}' popped from stack", popped);
self.set_title(popped);
}
}
#[inline]
fn text_area_size_pixels(&mut self) {
self.event_proxy.send_event(Event::TextAreaSizeRequest(Arc::new(move |window_size| {
let height = window_size.num_lines * window_size.cell_height;
let width = window_size.num_cols * window_size.cell_width;
format!("\x1b[4;{height};{width}t")
})));
}
#[inline]
fn text_area_size_chars(&mut self) {
let text = format!("\x1b[8;{};{}t", self.screen_lines(), self.columns());
self.event_proxy.send_event(Event::PtyWrite(text));
}
}
/// The state of the [`Mode`] and [`PrivateMode`].
#[repr(u8)]
#[derive(Debug, Clone, Copy)]
enum ModeState {
/// The mode is not supported.
NotSupported = 0,
/// The mode is currently set.
Set = 1,
/// The mode is currently not set.
Reset = 2,
}
impl From<bool> for ModeState {
fn from(value: bool) -> Self {
if value {
Self::Set
} else {
Self::Reset
}
}
}
/// Terminal version for escape sequence reports.
///
/// This returns the current terminal version as a unique number based on alacritty_terminal's
/// semver version. The different versions are padded to ensure that a higher semver version will
/// always report a higher version number.
fn version_number(mut version: &str) -> usize {
if let Some(separator) = version.rfind('-') {
version = &version[..separator];
}
let mut version_number = 0;
let semver_versions = version.split('.');
for (i, semver_version) in semver_versions.rev().enumerate() {
let semver_number = semver_version.parse::<usize>().unwrap_or(0);
version_number += usize::pow(100, i as u32) * semver_number;
}
version_number
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClipboardType {
Clipboard,
Selection,
}
struct TabStops {
tabs: Vec<bool>,
}
impl TabStops {
#[inline]
fn new(columns: usize) -> TabStops {
TabStops { tabs: (0..columns).map(|i| i % INITIAL_TABSTOPS == 0).collect() }
}
/// Remove all tabstops.
#[inline]
fn clear_all(&mut self) {
unsafe {
ptr::write_bytes(self.tabs.as_mut_ptr(), 0, self.tabs.len());
}
}
/// Increase tabstop capacity.
#[inline]
fn resize(&mut self, columns: usize) {
let mut index = self.tabs.len();
self.tabs.resize_with(columns, || {
let is_tabstop = index % INITIAL_TABSTOPS == 0;
index += 1;
is_tabstop
});
}
}
impl Index<Column> for TabStops {
type Output = bool;
fn index(&self, index: Column) -> &bool {
&self.tabs[index.0]
}
}
impl IndexMut<Column> for TabStops {
fn index_mut(&mut self, index: Column) -> &mut bool {
self.tabs.index_mut(index.0)
}
}
/// Terminal cursor rendering information.
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct RenderableCursor {
pub shape: CursorShape,
pub point: Point,
}
impl RenderableCursor {
fn new<T>(term: &Term<T>) -> Self {
// Cursor position.
let vi_mode = term.mode().contains(TermMode::VI);
let mut point = if vi_mode { term.vi_mode_cursor.point } else { term.grid.cursor.point };
if term.grid[point].flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
// Cursor shape.
let shape = if !vi_mode && !term.mode().contains(TermMode::SHOW_CURSOR) {
CursorShape::Hidden
} else {
term.cursor_style().shape
};
Self { shape, point }
}
}
/// Visible terminal content.
///
/// This contains all content required to render the current terminal view.
pub struct RenderableContent<'a> {
pub display_iter: GridIterator<'a, Cell>,
pub selection: Option<SelectionRange>,
pub cursor: RenderableCursor,
pub display_offset: usize,
pub colors: &'a color::Colors,
pub mode: TermMode,
}
impl<'a> RenderableContent<'a> {
fn new<T>(term: &'a Term<T>) -> Self {
Self {
display_iter: term.grid().display_iter(),
display_offset: term.grid().display_offset(),
cursor: RenderableCursor::new(term),
selection: term.selection.as_ref().and_then(|s| s.to_range(term)),
colors: &term.colors,
mode: *term.mode(),
}
}
}
/// Terminal test helpers.
pub mod test {
use super::*;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::VoidListener;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TermSize {
pub columns: usize,
pub screen_lines: usize,
}
impl TermSize {
pub fn new(columns: usize, screen_lines: usize) -> Self {
Self { columns, screen_lines }
}
}
impl Dimensions for TermSize {
fn total_lines(&self) -> usize {
self.screen_lines()
}
fn screen_lines(&self) -> usize {
self.screen_lines
}
fn columns(&self) -> usize {
self.columns
}
}
/// Construct a terminal from its content as string.
///
/// A `\n` will break line and `\r\n` will break line without wrapping.
///
/// # Examples
///
/// ```rust
/// use alacritty_terminal::term::test::mock_term;
///
/// // Create a terminal with the following cells:
/// //
/// // [h][e][l][l][o] <- WRAPLINE flag set
/// // [:][)][ ][ ][ ]
/// // [t][e][s][t][ ]
/// mock_term(
/// "\
/// hello\n:)\r\ntest",
/// );
/// ```
pub fn mock_term(content: &str) -> Term<VoidListener> {
let lines: Vec<&str> = content.split('\n').collect();
let num_cols = lines
.iter()
.map(|line| line.chars().filter(|c| *c != '\r').map(|c| c.width().unwrap()).sum())
.max()
.unwrap_or(0);
// Create terminal with the appropriate dimensions.
let size = TermSize::new(num_cols, lines.len());
let mut term = Term::new(Config::default(), &size, VoidListener);
// Fill terminal with content.
for (line, text) in lines.iter().enumerate() {
let line = Line(line as i32);
if !text.ends_with('\r') && line + 1 != lines.len() {
term.grid[line][Column(num_cols - 1)].flags.insert(Flags::WRAPLINE);
}
let mut index = 0;
for c in text.chars().take_while(|c| *c != '\r') {
term.grid[line][Column(index)].c = c;
// Handle fullwidth characters.
let width = c.width().unwrap();
if width == 2 {
term.grid[line][Column(index)].flags.insert(Flags::WIDE_CHAR);
term.grid[line][Column(index + 1)].flags.insert(Flags::WIDE_CHAR_SPACER);
}
index += width;
}
}
term
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
use crate::event::VoidListener;
use crate::grid::{Grid, Scroll};
use crate::index::{Column, Point, Side};
use crate::selection::{Selection, SelectionType};
use crate::term::cell::{Cell, Flags};
use crate::term::test::TermSize;
use crate::vte::ansi::{self, CharsetIndex, Handler, StandardCharset};
#[test]
fn scroll_display_page_up() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Scrollable amount to top is 11.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 10);
// Scrollable amount to top is 1.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
// Scrollable amount to top is 0.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
}
#[test]
fn scroll_display_page_down() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Change display_offset to topmost.
term.grid_mut().scroll_display(Scroll::Top);
term.vi_mode_cursor = ViModeCursor::new(Point::new(Line(-11), Column(0)));
// Scrollable amount to bottom is 11.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 1);
// Scrollable amount to bottom is 1.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
// Scrollable amount to bottom is 0.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn simple_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 0..4 {
if i == 1 {
continue;
}
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(0)].c = ' ';
grid[Line(2)][Column(4)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(0)].c = ' ';
// Multiple lines contain an empty line.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(0), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(2), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\"aaa\"\n\n aaa ")));
// A wrapline.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(2), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from(" aaa aaa\"")));
}
#[test]
fn semantic_selection_works() {
let size = TermSize::new(5, 3);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(3, 5, 0);
for i in 0..5 {
for j in 0..2 {
grid[Line(j)][Column(i)].c = 'a';
}
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
grid[Line(1)][Column(2)].c = '"';
grid[Line(0)][Column(4)].flags.insert(Flags::WRAPLINE);
let mut escape_chars = String::from("\"");
mem::swap(&mut term.grid, &mut grid);
mem::swap(&mut term.config.semantic_escape_chars, &mut escape_chars);
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(4) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(1), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
}
#[test]
fn line_selection_works() {
let size = TermSize::new(5, 1);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(1, 5, 0);
for i in 0..5 {
grid[Line(0)][Column(i)].c = 'a';
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
mem::swap(&mut term.grid, &mut grid);
term.selection = Some(Selection::new(
SelectionType::Lines,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("\"aa\"a\n")));
}
#[test]
fn block_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 1..4 {
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(2)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(4)].c = ' ';
term.selection = Some(Selection::new(
SelectionType::Block,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
// The same column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(3) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\na\na")));
// The first column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(0) }, Side::Left);
}
assert_eq!(term.selection_to_string(), Some(String::from("\n\"aa\n\"a\n\"aa")));
// The last column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\"\na\"\na")));
}
/// Check that the grid can be serialized back and forth losslessly.
///
/// This test is in the term module as opposed to the grid since we want to
/// test this property with a T=Cell.
#[test]
#[cfg(feature = "serde")]
fn grid_serde() {
let grid: Grid<Cell> = Grid::new(24, 80, 0);
let serialized = serde_json::to_string(&grid).expect("ser");
let deserialized = serde_json::from_str::<Grid<Cell>>(&serialized).expect("de");
assert_eq!(deserialized, grid);
}
#[test]
fn input_line_drawing_character() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
let cursor = Point::new(Line(0), Column(0));
term.configure_charset(CharsetIndex::G0, StandardCharset::SpecialCharacterAndLineDrawing);
term.input('a');
assert_eq!(term.grid()[cursor].c, '▒');
}
#[test]
fn clearing_viewport_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
}
#[test]
fn clearing_viewport_with_vi_mode_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-5), Column(3)));
}
#[test]
fn clearing_scrollback_resets_display_offset() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn clearing_scrollback_sets_vi_cursor_into_viewport() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(3)));
}
#[test]
fn clear_saved_lines() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Add one line of scrollback.
term.grid.scroll_up(&(Line(0)..Line(1)), 1);
// Clear the history.
term.clear_screen(ansi::ClearMode::Saved);
// Make sure that scrolling does not change the grid.
let mut scrolled_grid = term.grid.clone();
scrolled_grid.scroll_display(Scroll::Top);
// Truncate grids for comparison.
scrolled_grid.truncate();
term.grid.truncate();
assert_eq!(term.grid, scrolled_grid);
}
#[test]
fn vi_cursor_keep_pos_on_scrollback_buffer() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point.line = Line(-11);
term.linefeed();
assert_eq!(term.vi_mode_cursor.point.line, Line(-12));
}
#[test]
fn grow_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn grow_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn shrink_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn shrink_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn damage_public_usage() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
// Test that we damage input form [`Term::input`].
let left = term.grid.cursor.point.column.0;
term.input('d');
term.input('a');
term.input('m');
term.input('a');
term.input('g');
term.input('e');
let right = term.grid.cursor.point.column.0;
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), Some(LineDamageBounds { line: 0, left, right }));
assert_eq!(damaged_lines.next(), None);
term.reset_damage();
// Create scrollback.
for _ in 0..20 {
term.newline();
}
match term.damage() {
TermDamage::Full => (),
TermDamage::Partial(_) => panic!("Expected Full damage, however got Partial "),
};
term.reset_damage();
term.scroll_display(Scroll::Delta(10));
term.reset_damage();
// No damage when scrolled into viewport.
for idx in 0..term.columns() {
term.goto(idx as i32, idx);
}
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), None);
// Scroll back into the viewport, so we have 2 visible lines which terminal can write
// to.
term.scroll_display(Scroll::Delta(-2));
term.reset_damage();
term.goto(0, 0);
term.goto(1, 0);
term.goto(2, 0);
let display_offset = term.grid().display_offset();
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset, left: 0, right: 0 })
);
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset + 1, left: 0, right: 0 })
);
assert_eq!(damaged_lines.next(), None);
}
#[test]
fn damage_cursor_movements() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
let num_cols = term.columns();
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
term.goto(1, 1);
// NOTE While we can use `[Term::damage]` to access terminal damage information, in the
// following tests we will be accessing `term.damage.lines` directly to avoid adding extra
// damage information (like cursor and Vi cursor), which we're not testing.
assert_eq!(term.damage.lines[0], LineDamageBounds { line: 0, left: 0, right: 0 });
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
term.damage.reset(num_cols);
term.move_forward(3);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 4 });
term.damage.reset(num_cols);
term.move_backward(8);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 0, right: 4 });
term.goto(5, 5);
term.damage.reset(num_cols);
term.backspace();
term.backspace();
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 5 });
term.damage.reset(num_cols);
term.move_up(1);
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
term.damage.reset(num_cols);
term.move_down(1);
term.move_down(1);
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
term.damage.reset(num_cols);
term.wrapline();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 0 });
term.move_forward(3);
term.move_up(1);
term.damage.reset(num_cols);
term.linefeed();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 3, right: 3 });
term.damage.reset(num_cols);
term.carriage_return();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 3 });
term.damage.reset(num_cols);
term.erase_chars(5);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 5 });
term.damage.reset(num_cols);
term.delete_chars(3);
let right = term.columns() - 1;
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.move_forward(term.columns());
term.damage.reset(num_cols);
term.move_backward_tabs(1);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.save_cursor_position();
term.goto(1, 1);
term.damage.reset(num_cols);
term.restore_cursor_position();
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::All);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Left);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Right);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.damage.reset(num_cols);
term.reverse_index();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 8, right: 8 });
}
#[test]
fn full_damage() {
let size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
assert!(term.damage.full);
for _ in 0..20 {
term.newline();
}
term.reset_damage();
term.clear_screen(ansi::ClearMode::Above);
assert!(term.damage.full);
term.reset_damage();
term.scroll_display(Scroll::Top);
assert!(term.damage.full);
term.reset_damage();
// Sequential call to scroll display without doing anything shouldn't damage.
term.scroll_display(Scroll::Top);
assert!(!term.damage.full);
term.reset_damage();
term.set_options(Config::default());
assert!(term.damage.full);
term.reset_damage();
term.scroll_down_relative(Line(5), 2);
assert!(term.damage.full);
term.reset_damage();
term.scroll_up_relative(Line(3), 2);
assert!(term.damage.full);
term.reset_damage();
term.deccolm();
assert!(term.damage.full);
term.reset_damage();
term.decaln();
assert!(term.damage.full);
term.reset_damage();
term.set_mode(NamedMode::Insert.into());
// Just setting `Insert` mode shouldn't mark terminal as damaged.
assert!(!term.damage.full);
term.reset_damage();
let color_index = 257;
term.set_color(color_index, Rgb::default());
assert!(term.damage.full);
term.reset_damage();
// Setting the same color once again shouldn't trigger full damage.
term.set_color(color_index, Rgb::default());
assert!(!term.damage.full);
term.reset_color(color_index);
assert!(term.damage.full);
term.reset_damage();
// We shouldn't trigger fully damage when cursor gets update.
term.set_color(NamedColor::Cursor as usize, Rgb::default());
assert!(!term.damage.full);
// However requesting terminal damage should mark terminal as fully damaged in `Insert`
// mode.
let _ = term.damage();
assert!(term.damage.full);
term.reset_damage();
term.unset_mode(NamedMode::Insert.into());
assert!(term.damage.full);
term.reset_damage();
// Keep this as a last check, so we don't have to deal with restoring from alt-screen.
term.swap_alt();
assert!(term.damage.full);
term.reset_damage();
let size = TermSize::new(10, 10);
term.resize(size);
assert!(term.damage.full);
}
#[test]
fn window_title() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Title None by default.
assert_eq!(term.title, None);
// Title can be set.
term.set_title(Some("Test".into()));
assert_eq!(term.title, Some("Test".into()));
// Title can be pushed onto stack.
term.push_title();
term.set_title(Some("Next".into()));
assert_eq!(term.title, Some("Next".into()));
assert_eq!(term.title_stack.first().unwrap(), &Some("Test".into()));
// Title can be popped from stack and set as the window title.
term.pop_title();
assert_eq!(term.title, Some("Test".into()));
assert!(term.title_stack.is_empty());
// Title stack doesn't grow infinitely.
for _ in 0..4097 {
term.push_title();
}
assert_eq!(term.title_stack.len(), 4096);
// Title and title stack reset when terminal state is reset.
term.push_title();
term.reset_state();
assert_eq!(term.title, None);
assert!(term.title_stack.is_empty());
// Title stack pops back to default.
term.title = None;
term.push_title();
term.set_title(Some("Test".into()));
term.pop_title();
assert_eq!(term.title, None);
// Title can be reset to default.
term.title = Some("Test".into());
term.set_title(None);
assert_eq!(term.title, None);
}
#[test]
fn parse_cargo_version() {
assert!(version_number(env!("CARGO_PKG_VERSION")) >= 10_01);
assert_eq!(version_number("0.0.1-dev"), 1);
assert_eq!(version_number("0.1.2-dev"), 1_02);
assert_eq!(version_number("1.2.3-dev"), 1_02_03);
assert_eq!(version_number("999.99.99"), 9_99_99_99);
}
}
| rust | {
"argument_definitions": [],
"end_line": 445,
"name": "new",
"signature": "pub fn new(config: Config, dimensions: &D, event_proxy: T) -> Term<T>",
"start_line": 410
} | {
"class_name": "impl<T> Term<T> {\n #[inline]\n pub fn scroll_display(&mut self, scroll: Scroll)\n where\n T: EventListener,\n {\n let old_display_offset = self.grid.display_offset();\n self.grid.scroll_display(scroll);\n self.event_proxy.send_event(Event::MouseCursorDirty);\n\n // Clamp vi mode cursor to the viewport.\n let viewport_start = -(self.grid.display_offset() as i32);\n let viewport_end = viewport_start + self.bottommost_line().0;\n let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;\n *vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));\n self.vi_mode_recompute_selection();\n\n // Damage everything if display offset changed.\n if old_display_offset != self.grid().display_offset() {\n self.mark_fully_damaged();\n }\n }\n\n pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {\n let num_cols = dimensions.columns();\n let num_lines = dimensions.screen_lines();\n\n let history_size = config.scrolling_history;\n let grid = Grid::new(num_lines, num_cols, history_size);\n let inactive_grid = Grid::new(num_lines, num_cols, 0);\n\n let tabs = TabStops::new(grid.columns());\n\n let scroll_region = Line(0)..Line(grid.screen_lines() as i32);\n\n // Initialize terminal damage, covering the entire terminal upon launch.\n let damage = TermDamageState::new(num_cols, num_lines);\n\n Term {\n inactive_grid,\n scroll_region,\n event_proxy,\n damage,\n config,\n grid,\n tabs,\n inactive_keyboard_mode_stack: Default::default(),\n keyboard_mode_stack: Default::default(),\n active_charset: Default::default(),\n vi_mode_cursor: Default::default(),\n cursor_style: Default::default(),\n colors: color::Colors::default(),\n title_stack: Default::default(),\n is_focused: Default::default(),\n selection: Default::default(),\n title: Default::default(),\n mode: Default::default(),\n }\n }\n\n /// Collect the information about the changes in the lines, which\n /// could be used to minimize the amount of drawing operations.\n ///\n /// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the\n /// collected damage state. Those could easily be tracked by comparing their old and new\n /// value between adjacent frames.\n ///\n /// After reading damage [`reset_damage`] should be called.\n ///\n /// [`reset_damage`]: Self::reset_damage\n #[must_use]\n pub fn damage(&mut self) -> TermDamage<'_> {\n // Ensure the entire terminal is damaged after entering insert mode.\n // Leaving is handled in the ansi handler.\n if self.mode.contains(TermMode::INSERT) {\n self.mark_fully_damaged();\n }\n\n let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);\n\n if self.damage.full {\n return TermDamage::Full;\n }\n\n // Add information about old cursor position and new one if they are not the same, so we\n // cover everything that was produced by `Term::input`.\n if self.damage.last_cursor != previous_cursor {\n // Cursor coordinates are always inside viewport even if you have `display_offset`.\n let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);\n self.damage.damage_point(point);\n }\n\n // Always damage current cursor.\n self.damage_cursor();\n\n // NOTE: damage which changes all the content when the display offset is non-zero (e.g.\n // scrolling) is handled via full damage.\n let display_offset = self.grid().display_offset();\n TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))\n }\n\n /// Resets the terminal damage information.\n pub fn reset_damage(&mut self) {\n self.damage.reset(self.columns());\n }\n\n #[inline]\n fn mark_fully_damaged(&mut self) {\n self.damage.full = true;\n }\n\n /// Set new options for the [`Term`].\n pub fn set_options(&mut self, options: Config)\n where\n T: EventListener,\n {\n let old_config = mem::replace(&mut self.config, options);\n\n let title_event = match &self.title {\n Some(title) => Event::Title(title.clone()),\n None => Event::ResetTitle,\n };\n\n self.event_proxy.send_event(title_event);\n\n if self.mode.contains(TermMode::ALT_SCREEN) {\n self.inactive_grid.update_history(self.config.scrolling_history);\n } else {\n self.grid.update_history(self.config.scrolling_history);\n }\n\n if self.config.kitty_keyboard != old_config.kitty_keyboard {\n self.keyboard_mode_stack = Vec::new();\n self.inactive_keyboard_mode_stack = Vec::new();\n self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);\n }\n\n // Damage everything on config updates.\n self.mark_fully_damaged();\n }\n\n /// Convert the active selection to a String.\n pub fn selection_to_string(&self) -> Option<String> {\n let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;\n let SelectionRange { start, end, .. } = selection_range;\n\n let mut res = String::new();\n\n match self.selection.as_ref() {\n Some(Selection { ty: SelectionType::Block, .. }) => {\n for line in (start.line.0..end.line.0).map(Line::from) {\n res += self\n .line_to_string(line, start.column..end.column, start.column.0 != 0)\n .trim_end();\n res += \"\\n\";\n }\n\n res += self.line_to_string(end.line, start.column..end.column, true).trim_end();\n },\n Some(Selection { ty: SelectionType::Lines, .. }) => {\n res = self.bounds_to_string(start, end) + \"\\n\";\n },\n _ => {\n res = self.bounds_to_string(start, end);\n },\n }\n\n Some(res)\n }\n\n /// Convert range between two points to a String.\n pub fn bounds_to_string(&self, start: Point, end: Point) -> String {\n let mut res = String::new();\n\n for line in (start.line.0..=end.line.0).map(Line::from) {\n let start_col = if line == start.line { start.column } else { Column(0) };\n let end_col = if line == end.line { end.column } else { self.last_column() };\n\n res += &self.line_to_string(line, start_col..end_col, line == end.line);\n }\n\n res.strip_suffix('\\n').map(str::to_owned).unwrap_or(res)\n }\n\n /// Convert a single line in the grid to a String.\n fn line_to_string(\n &self,\n line: Line,\n mut cols: Range<Column>,\n include_wrapped_wide: bool,\n ) -> String {\n let mut text = String::new();\n\n let grid_line = &self.grid[line];\n let line_length = cmp::min(grid_line.line_length(), cols.end + 1);\n\n // Include wide char when trailing spacer is selected.\n if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {\n cols.start -= 1;\n }\n\n let mut tab_mode = false;\n for column in (cols.start.0..line_length.0).map(Column::from) {\n let cell = &grid_line[column];\n\n // Skip over cells until next tab-stop once a tab was found.\n if tab_mode {\n if self.tabs[column] || cell.c != ' ' {\n tab_mode = false;\n } else {\n continue;\n }\n }\n\n if cell.c == '\\t' {\n tab_mode = true;\n }\n\n if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {\n // Push cells primary character.\n text.push(cell.c);\n\n // Push zero-width characters.\n for c in cell.zerowidth().into_iter().flatten() {\n text.push(*c);\n }\n }\n }\n\n if cols.end >= self.columns() - 1\n && (line_length.0 == 0\n || !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))\n {\n text.push('\\n');\n }\n\n // If wide char is not part of the selection, but leading spacer is, include it.\n if line_length == self.columns()\n && line_length.0 >= 2\n && grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)\n && include_wrapped_wide\n {\n text.push(self.grid[line - 1i32][Column(0)].c);\n }\n\n text\n }\n\n /// Terminal content required for rendering.\n #[inline]\n pub fn renderable_content(&self) -> RenderableContent<'_>\n where\n T: EventListener,\n {\n RenderableContent::new(self)\n }\n\n /// Access to the raw grid data structure.\n pub fn grid(&self) -> &Grid<Cell> {\n &self.grid\n }\n\n /// Mutable access to the raw grid data structure.\n pub fn grid_mut(&mut self) -> &mut Grid<Cell> {\n &mut self.grid\n }\n\n /// Resize terminal to new dimensions.\n pub fn resize<S: Dimensions>(&mut self, size: S) {\n let old_cols = self.columns();\n let old_lines = self.screen_lines();\n\n let num_cols = size.columns();\n let num_lines = size.screen_lines();\n\n if old_cols == num_cols && old_lines == num_lines {\n debug!(\"Term::resize dimensions unchanged\");\n return;\n }\n\n debug!(\"New num_cols is {} and num_lines is {}\", num_cols, num_lines);\n\n // Move vi mode cursor with the content.\n let history_size = self.history_size();\n let mut delta = num_lines as i32 - old_lines as i32;\n let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);\n delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);\n self.vi_mode_cursor.point.line += delta;\n\n let is_alt = self.mode.contains(TermMode::ALT_SCREEN);\n self.grid.resize(!is_alt, num_lines, num_cols);\n self.inactive_grid.resize(is_alt, num_lines, num_cols);\n\n // Invalidate selection and tabs only when necessary.\n if old_cols != num_cols {\n self.selection = None;\n\n // Recreate tabs list.\n self.tabs.resize(num_cols);\n } else if let Some(selection) = self.selection.take() {\n let max_lines = cmp::max(num_lines, old_lines) as i32;\n let range = Line(0)..Line(max_lines);\n self.selection = selection.rotate(self, &range, -delta);\n }\n\n // Clamp vi cursor to viewport.\n let vi_point = self.vi_mode_cursor.point;\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let viewport_bottom = viewport_top + self.bottommost_line();\n self.vi_mode_cursor.point.line =\n cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);\n self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());\n\n // Reset scrolling region.\n self.scroll_region = Line(0)..Line(self.screen_lines() as i32);\n\n // Resize damage information.\n self.damage.resize(num_cols, num_lines);\n }\n\n /// Active terminal modes.\n #[inline]\n pub fn mode(&self) -> &TermMode {\n &self.mode\n }\n\n /// Swap primary and alternate screen buffer.\n pub fn swap_alt(&mut self) {\n if !self.mode.contains(TermMode::ALT_SCREEN) {\n // Set alt screen cursor to the current primary screen cursor.\n self.inactive_grid.cursor = self.grid.cursor.clone();\n\n // Drop information about the primary screens saved cursor.\n self.grid.saved_cursor = self.grid.cursor.clone();\n\n // Reset alternate screen contents.\n self.inactive_grid.reset_region(..);\n }\n\n mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);\n let keyboard_mode =\n self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();\n self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);\n\n mem::swap(&mut self.grid, &mut self.inactive_grid);\n self.mode ^= TermMode::ALT_SCREEN;\n self.selection = None;\n self.mark_fully_damaged();\n }\n\n /// Scroll screen down.\n ///\n /// Text moves down; clear at bottom\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling down relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection =\n self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));\n\n // Scroll vi mode cursor.\n let line = &mut self.vi_mode_cursor.point.line;\n if region.start <= *line && region.end > *line {\n *line = cmp::min(*line + lines, region.end - 1);\n }\n\n // Scroll between origin and bottom\n self.grid.scroll_down(®ion, lines);\n self.mark_fully_damaged();\n }\n\n /// Scroll screen up\n ///\n /// Text moves up; clear at top\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling up relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));\n\n self.grid.scroll_up(®ion, lines);\n\n // Scroll vi mode cursor.\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let top = if region.start == 0 { viewport_top } else { region.start };\n let line = &mut self.vi_mode_cursor.point.line;\n if (top <= *line) && region.end > *line {\n *line = cmp::max(*line - lines, top);\n }\n self.mark_fully_damaged();\n }\n\n fn deccolm(&mut self)\n where\n T: EventListener,\n {\n // Setting 132 column font makes no sense, but run the other side effects.\n // Clear scrolling region.\n self.set_scrolling_region(1, None);\n\n // Clear grid.\n self.grid.reset_region(..);\n self.mark_fully_damaged();\n }\n\n #[inline]\n pub fn exit(&mut self)\n where\n T: EventListener,\n {\n self.event_proxy.send_event(Event::Exit);\n }\n\n /// Toggle the vi mode.\n #[inline]\n pub fn toggle_vi_mode(&mut self)\n where\n T: EventListener,\n {\n self.mode ^= TermMode::VI;\n\n if self.mode.contains(TermMode::VI) {\n let display_offset = self.grid.display_offset() as i32;\n if self.grid.cursor.point.line > self.bottommost_line() - display_offset {\n // Move cursor to top-left if terminal cursor is not visible.\n let point = Point::new(Line(-display_offset), Column(0));\n self.vi_mode_cursor = ViModeCursor::new(point);\n } else {\n // Reset vi mode cursor position to match primary cursor.\n self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);\n }\n }\n\n // Update UI about cursor blinking state changes.\n self.event_proxy.send_event(Event::CursorBlinkingChange);\n }\n\n /// Move vi mode cursor.\n #[inline]\n pub fn vi_motion(&mut self, motion: ViMotion)\n where\n T: EventListener,\n {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Move cursor.\n self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);\n self.vi_mode_recompute_selection();\n }\n\n /// Move vi cursor to a point in the grid.\n #[inline]\n pub fn vi_goto_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n // Move viewport to make point visible.\n self.scroll_to_point(point);\n\n // Move vi cursor to the point.\n self.vi_mode_cursor.point = point;\n\n self.vi_mode_recompute_selection();\n }\n\n /// Update the active selection to match the vi mode cursor position.\n #[inline]\n fn vi_mode_recompute_selection(&mut self) {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Update only if non-empty selection is present.\n if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {\n selection.update(self.vi_mode_cursor.point, Side::Left);\n selection.include_all();\n }\n }\n\n /// Scroll display to point if it is outside of viewport.\n pub fn scroll_to_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n let display_offset = self.grid.display_offset() as i32;\n let screen_lines = self.grid.screen_lines() as i32;\n\n if point.line < -display_offset {\n let lines = point.line + display_offset;\n self.scroll_display(Scroll::Delta(-lines.0));\n } else if point.line >= (screen_lines - display_offset) {\n let lines = point.line + display_offset - screen_lines + 1i32;\n self.scroll_display(Scroll::Delta(-lines.0));\n }\n }\n\n /// Jump to the end of a wide cell.\n pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {\n let flags = self.grid[point.line][point.column].flags;\n\n match direction {\n Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n point.column = Column(1);\n point.line += 1;\n },\n Direction::Right if flags.contains(Flags::WIDE_CHAR) => {\n point.column = cmp::min(point.column + 1, self.last_column());\n },\n Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {\n if flags.contains(Flags::WIDE_CHAR_SPACER) {\n point.column -= 1;\n }\n\n let prev = point.sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n point = prev;\n }\n },\n _ => (),\n }\n\n point\n }\n\n #[inline]\n pub fn semantic_escape_chars(&self) -> &str {\n &self.config.semantic_escape_chars\n }\n\n #[cfg(test)]\n pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {\n self.config.semantic_escape_chars = semantic_escape_chars.into();\n }\n\n /// Active terminal cursor style.\n ///\n /// While vi mode is active, this will automatically return the vi mode cursor style.\n #[inline]\n pub fn cursor_style(&self) -> CursorStyle {\n let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);\n\n if self.mode.contains(TermMode::VI) {\n self.config.vi_mode_cursor_style.unwrap_or(cursor_style)\n } else {\n cursor_style\n }\n }\n\n pub fn colors(&self) -> &Colors {\n &self.colors\n }\n\n /// Insert a linebreak at the current cursor position.\n #[inline]\n fn wrapline(&mut self)\n where\n T: EventListener,\n {\n if !self.mode.contains(TermMode::LINE_WRAP) {\n return;\n }\n\n trace!(\"Wrapping input\");\n\n self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);\n\n if self.grid.cursor.point.line + 1 >= self.scroll_region.end {\n self.linefeed();\n } else {\n self.damage_cursor();\n self.grid.cursor.point.line += 1;\n }\n\n self.grid.cursor.point.column = Column(0);\n self.grid.cursor.input_needs_wrap = false;\n self.damage_cursor();\n }\n\n /// Write `c` to the cell at the cursor position.\n #[inline(always)]\n fn write_at_cursor(&mut self, c: char) {\n let c = self.grid.cursor.charsets[self.active_charset].map(c);\n let fg = self.grid.cursor.template.fg;\n let bg = self.grid.cursor.template.bg;\n let flags = self.grid.cursor.template.flags;\n let extra = self.grid.cursor.template.extra.clone();\n\n let mut cursor_cell = self.grid.cursor_cell();\n\n // Clear all related cells when overwriting a fullwidth cell.\n if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {\n // Remove wide char and spacer.\n let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);\n let point = self.grid.cursor.point;\n if wide && point.column < self.last_column() {\n self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);\n } else if point.column > 0 {\n self.grid[point.line][point.column - 1].clear_wide();\n }\n\n // Remove leading spacers.\n if point.column <= 1 && point.line != self.topmost_line() {\n let column = self.last_column();\n self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);\n }\n\n cursor_cell = self.grid.cursor_cell();\n }\n\n cursor_cell.c = c;\n cursor_cell.fg = fg;\n cursor_cell.bg = bg;\n cursor_cell.flags = flags;\n cursor_cell.extra = extra;\n }\n\n #[inline]\n fn damage_cursor(&mut self) {\n // The normal cursor coordinates are always in viewport.\n let point =\n Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);\n self.damage.damage_point(point);\n }\n\n #[inline]\n fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {\n let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;\n self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;\n let new_mode = match apply {\n KeyboardModesApplyBehavior::Replace => mode,\n KeyboardModesApplyBehavior::Union => active_mode.union(mode),\n KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),\n };\n trace!(\"Setting keyboard mode to {new_mode:?}\");\n self.mode |= new_mode;\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
line_to_string | alacritty-master/alacritty_terminal/src/term/mod.rs | fn line_to_string(
&self,
line: Line,
mut cols: Range<Column>,
include_wrapped_wide: bool,
) -> String {
let mut text = String::new();
let grid_line = &self.grid[line];
let line_length = cmp::min(grid_line.line_length(), cols.end + 1);
// Include wide char when trailing spacer is selected.
if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {
cols.start -= 1;
}
let mut tab_mode = false;
for column in (cols.start.0..line_length.0).map(Column::from) {
let cell = &grid_line[column];
// Skip over cells until next tab-stop once a tab was found.
if tab_mode {
if self.tabs[column] || cell.c != ' ' {
tab_mode = false;
} else {
continue;
}
}
if cell.c == '\t' {
tab_mode = true;
}
if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {
// Push cells primary character.
text.push(cell.c);
// Push zero-width characters.
for c in cell.zerowidth().into_iter().flatten() {
text.push(*c);
}
}
}
if cols.end >= self.columns() - 1
&& (line_length.0 == 0
|| !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))
{
text.push('\n');
}
// If wide char is not part of the selection, but leading spacer is, include it.
if line_length == self.columns()
&& line_length.0 >= 2
&& grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)
&& include_wrapped_wide
{
text.push(self.grid[line - 1i32][Column(0)].c);
}
text
} | //! Exports the `Term` type which is a high-level API for the Grid.
use std::ops::{Index, IndexMut, Range};
use std::sync::Arc;
use std::{cmp, mem, ptr, slice, str};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use base64::engine::general_purpose::STANDARD as Base64;
use base64::Engine;
use bitflags::bitflags;
use log::{debug, trace};
use unicode_width::UnicodeWidthChar;
use crate::event::{Event, EventListener};
use crate::grid::{Dimensions, Grid, GridIterator, Scroll};
use crate::index::{self, Boundary, Column, Direction, Line, Point, Side};
use crate::selection::{Selection, SelectionRange, SelectionType};
use crate::term::cell::{Cell, Flags, LineLength};
use crate::term::color::Colors;
use crate::vi_mode::{ViModeCursor, ViMotion};
use crate::vte::ansi::{
self, Attr, CharsetIndex, Color, CursorShape, CursorStyle, Handler, Hyperlink, KeyboardModes,
KeyboardModesApplyBehavior, NamedColor, NamedMode, NamedPrivateMode, PrivateMode, Rgb,
StandardCharset,
};
pub mod cell;
pub mod color;
pub mod search;
/// Minimum number of columns.
///
/// A minimum of 2 is necessary to hold fullwidth unicode characters.
pub const MIN_COLUMNS: usize = 2;
/// Minimum number of visible lines.
pub const MIN_SCREEN_LINES: usize = 1;
/// Max size of the window title stack.
const TITLE_STACK_MAX_DEPTH: usize = 4096;
/// Default semantic escape characters.
pub const SEMANTIC_ESCAPE_CHARS: &str = ",│`|:\"' ()[]{}<>\t";
/// Max size of the keyboard modes.
const KEYBOARD_MODE_STACK_MAX_DEPTH: usize = TITLE_STACK_MAX_DEPTH;
/// Default tab interval, corresponding to terminfo `it` value.
const INITIAL_TABSTOPS: usize = 8;
bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct TermMode: u32 {
const NONE = 0;
const SHOW_CURSOR = 1;
const APP_CURSOR = 1 << 1;
const APP_KEYPAD = 1 << 2;
const MOUSE_REPORT_CLICK = 1 << 3;
const BRACKETED_PASTE = 1 << 4;
const SGR_MOUSE = 1 << 5;
const MOUSE_MOTION = 1 << 6;
const LINE_WRAP = 1 << 7;
const LINE_FEED_NEW_LINE = 1 << 8;
const ORIGIN = 1 << 9;
const INSERT = 1 << 10;
const FOCUS_IN_OUT = 1 << 11;
const ALT_SCREEN = 1 << 12;
const MOUSE_DRAG = 1 << 13;
const UTF8_MOUSE = 1 << 14;
const ALTERNATE_SCROLL = 1 << 15;
const VI = 1 << 16;
const URGENCY_HINTS = 1 << 17;
const DISAMBIGUATE_ESC_CODES = 1 << 18;
const REPORT_EVENT_TYPES = 1 << 19;
const REPORT_ALTERNATE_KEYS = 1 << 20;
const REPORT_ALL_KEYS_AS_ESC = 1 << 21;
const REPORT_ASSOCIATED_TEXT = 1 << 22;
const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();
const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()
| Self::REPORT_EVENT_TYPES.bits()
| Self::REPORT_ALTERNATE_KEYS.bits()
| Self::REPORT_ALL_KEYS_AS_ESC.bits()
| Self::REPORT_ASSOCIATED_TEXT.bits();
const ANY = u32::MAX;
}
}
impl From<KeyboardModes> for TermMode {
fn from(value: KeyboardModes) -> Self {
let mut mode = Self::empty();
let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);
mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);
let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);
mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);
let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);
mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);
let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);
mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);
let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);
mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);
mode
}
}
impl Default for TermMode {
fn default() -> TermMode {
TermMode::SHOW_CURSOR
| TermMode::LINE_WRAP
| TermMode::ALTERNATE_SCROLL
| TermMode::URGENCY_HINTS
}
}
/// Convert a terminal point to a viewport relative point.
#[inline]
pub fn point_to_viewport(display_offset: usize, point: Point) -> Option<Point<usize>> {
let viewport_line = point.line.0 + display_offset as i32;
usize::try_from(viewport_line).ok().map(|line| Point::new(line, point.column))
}
/// Convert a viewport relative point to a terminal point.
#[inline]
pub fn viewport_to_point(display_offset: usize, point: Point<usize>) -> Point {
let line = Line(point.line as i32) - display_offset;
Point::new(line, point.column)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LineDamageBounds {
/// Damaged line number.
pub line: usize,
/// Leftmost damaged column.
pub left: usize,
/// Rightmost damaged column.
pub right: usize,
}
impl LineDamageBounds {
#[inline]
pub fn new(line: usize, left: usize, right: usize) -> Self {
Self { line, left, right }
}
#[inline]
pub fn undamaged(line: usize, num_cols: usize) -> Self {
Self { line, left: num_cols, right: 0 }
}
#[inline]
pub fn reset(&mut self, num_cols: usize) {
*self = Self::undamaged(self.line, num_cols);
}
#[inline]
pub fn expand(&mut self, left: usize, right: usize) {
self.left = cmp::min(self.left, left);
self.right = cmp::max(self.right, right);
}
#[inline]
pub fn is_damaged(&self) -> bool {
self.left <= self.right
}
}
/// Terminal damage information collected since the last [`Term::reset_damage`] call.
#[derive(Debug)]
pub enum TermDamage<'a> {
/// The entire terminal is damaged.
Full,
/// Iterator over damaged lines in the terminal.
Partial(TermDamageIterator<'a>),
}
/// Iterator over the terminal's viewport damaged lines.
#[derive(Clone, Debug)]
pub struct TermDamageIterator<'a> {
line_damage: slice::Iter<'a, LineDamageBounds>,
display_offset: usize,
}
impl<'a> TermDamageIterator<'a> {
pub fn new(line_damage: &'a [LineDamageBounds], display_offset: usize) -> Self {
let num_lines = line_damage.len();
// Filter out invisible damage.
let line_damage = &line_damage[..num_lines.saturating_sub(display_offset)];
Self { display_offset, line_damage: line_damage.iter() }
}
}
impl Iterator for TermDamageIterator<'_> {
type Item = LineDamageBounds;
fn next(&mut self) -> Option<Self::Item> {
self.line_damage.find_map(|line| {
line.is_damaged().then_some(LineDamageBounds::new(
line.line + self.display_offset,
line.left,
line.right,
))
})
}
}
/// State of the terminal damage.
struct TermDamageState {
/// Hint whether terminal should be damaged entirely regardless of the actual damage changes.
full: bool,
/// Information about damage on terminal lines.
lines: Vec<LineDamageBounds>,
/// Old terminal cursor point.
last_cursor: Point,
}
impl TermDamageState {
fn new(num_cols: usize, num_lines: usize) -> Self {
let lines =
(0..num_lines).map(|line| LineDamageBounds::undamaged(line, num_cols)).collect();
Self { full: true, lines, last_cursor: Default::default() }
}
#[inline]
fn resize(&mut self, num_cols: usize, num_lines: usize) {
// Reset point, so old cursor won't end up outside of the viewport.
self.last_cursor = Default::default();
self.full = true;
self.lines.clear();
self.lines.reserve(num_lines);
for line in 0..num_lines {
self.lines.push(LineDamageBounds::undamaged(line, num_cols));
}
}
/// Damage point inside of the viewport.
#[inline]
fn damage_point(&mut self, point: Point<usize>) {
self.damage_line(point.line, point.column.0, point.column.0);
}
/// Expand `line`'s damage to span at least `left` to `right` column.
#[inline]
fn damage_line(&mut self, line: usize, left: usize, right: usize) {
self.lines[line].expand(left, right);
}
/// Reset information about terminal damage.
fn reset(&mut self, num_cols: usize) {
self.full = false;
self.lines.iter_mut().for_each(|line| line.reset(num_cols));
}
}
pub struct Term<T> {
/// Terminal focus controlling the cursor shape.
pub is_focused: bool,
/// Cursor for keyboard selection.
pub vi_mode_cursor: ViModeCursor,
pub selection: Option<Selection>,
/// Currently active grid.
///
/// Tracks the screen buffer currently in use. While the alternate screen buffer is active,
/// this will be the alternate grid. Otherwise it is the primary screen buffer.
grid: Grid<Cell>,
/// Currently inactive grid.
///
/// Opposite of the active grid. While the alternate screen buffer is active, this will be the
/// primary grid. Otherwise it is the alternate screen buffer.
inactive_grid: Grid<Cell>,
/// Index into `charsets`, pointing to what ASCII is currently being mapped to.
active_charset: CharsetIndex,
/// Tabstops.
tabs: TabStops,
/// Mode flags.
mode: TermMode,
/// Scroll region.
///
/// Range going from top to bottom of the terminal, indexed from the top of the viewport.
scroll_region: Range<Line>,
/// Modified terminal colors.
colors: Colors,
/// Current style of the cursor.
cursor_style: Option<CursorStyle>,
/// Proxy for sending events to the event loop.
event_proxy: T,
/// Current title of the window.
title: Option<String>,
/// Stack of saved window titles. When a title is popped from this stack, the `title` for the
/// term is set.
title_stack: Vec<Option<String>>,
/// The stack for the keyboard modes.
keyboard_mode_stack: Vec<KeyboardModes>,
/// Currently inactive keyboard mode stack.
inactive_keyboard_mode_stack: Vec<KeyboardModes>,
/// Information about damaged cells.
damage: TermDamageState,
/// Config directly for the terminal.
config: Config,
}
/// Configuration options for the [`Term`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
/// The maximum amount of scrolling history.
pub scrolling_history: usize,
/// Default cursor style to reset the cursor to.
pub default_cursor_style: CursorStyle,
/// Cursor style for Vi mode.
pub vi_mode_cursor_style: Option<CursorStyle>,
/// The characters which terminate semantic selection.
///
/// The default value is [`SEMANTIC_ESCAPE_CHARS`].
pub semantic_escape_chars: String,
/// Whether to enable kitty keyboard protocol.
pub kitty_keyboard: bool,
/// OSC52 support mode.
pub osc52: Osc52,
}
impl Default for Config {
fn default() -> Self {
Self {
scrolling_history: 10000,
semantic_escape_chars: SEMANTIC_ESCAPE_CHARS.to_owned(),
default_cursor_style: Default::default(),
vi_mode_cursor_style: Default::default(),
kitty_keyboard: Default::default(),
osc52: Default::default(),
}
}
}
/// OSC 52 behavior.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum Osc52 {
/// The handling of the escape sequence is disabled.
Disabled,
/// Only copy sequence is accepted.
///
/// This option is the default as a compromise between entirely
/// disabling it (the most secure) and allowing `paste` (the less secure).
#[default]
OnlyCopy,
/// Only paste sequence is accepted.
OnlyPaste,
/// Both are accepted.
CopyPaste,
}
impl<T> Term<T> {
#[inline]
pub fn scroll_display(&mut self, scroll: Scroll)
where
T: EventListener,
{
let old_display_offset = self.grid.display_offset();
self.grid.scroll_display(scroll);
self.event_proxy.send_event(Event::MouseCursorDirty);
// Clamp vi mode cursor to the viewport.
let viewport_start = -(self.grid.display_offset() as i32);
let viewport_end = viewport_start + self.bottommost_line().0;
let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;
*vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));
self.vi_mode_recompute_selection();
// Damage everything if display offset changed.
if old_display_offset != self.grid().display_offset() {
self.mark_fully_damaged();
}
}
pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {
let num_cols = dimensions.columns();
let num_lines = dimensions.screen_lines();
let history_size = config.scrolling_history;
let grid = Grid::new(num_lines, num_cols, history_size);
let inactive_grid = Grid::new(num_lines, num_cols, 0);
let tabs = TabStops::new(grid.columns());
let scroll_region = Line(0)..Line(grid.screen_lines() as i32);
// Initialize terminal damage, covering the entire terminal upon launch.
let damage = TermDamageState::new(num_cols, num_lines);
Term {
inactive_grid,
scroll_region,
event_proxy,
damage,
config,
grid,
tabs,
inactive_keyboard_mode_stack: Default::default(),
keyboard_mode_stack: Default::default(),
active_charset: Default::default(),
vi_mode_cursor: Default::default(),
cursor_style: Default::default(),
colors: color::Colors::default(),
title_stack: Default::default(),
is_focused: Default::default(),
selection: Default::default(),
title: Default::default(),
mode: Default::default(),
}
}
/// Collect the information about the changes in the lines, which
/// could be used to minimize the amount of drawing operations.
///
/// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the
/// collected damage state. Those could easily be tracked by comparing their old and new
/// value between adjacent frames.
///
/// After reading damage [`reset_damage`] should be called.
///
/// [`reset_damage`]: Self::reset_damage
#[must_use]
pub fn damage(&mut self) -> TermDamage<'_> {
// Ensure the entire terminal is damaged after entering insert mode.
// Leaving is handled in the ansi handler.
if self.mode.contains(TermMode::INSERT) {
self.mark_fully_damaged();
}
let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);
if self.damage.full {
return TermDamage::Full;
}
// Add information about old cursor position and new one if they are not the same, so we
// cover everything that was produced by `Term::input`.
if self.damage.last_cursor != previous_cursor {
// Cursor coordinates are always inside viewport even if you have `display_offset`.
let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);
self.damage.damage_point(point);
}
// Always damage current cursor.
self.damage_cursor();
// NOTE: damage which changes all the content when the display offset is non-zero (e.g.
// scrolling) is handled via full damage.
let display_offset = self.grid().display_offset();
TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))
}
/// Resets the terminal damage information.
pub fn reset_damage(&mut self) {
self.damage.reset(self.columns());
}
#[inline]
fn mark_fully_damaged(&mut self) {
self.damage.full = true;
}
/// Set new options for the [`Term`].
pub fn set_options(&mut self, options: Config)
where
T: EventListener,
{
let old_config = mem::replace(&mut self.config, options);
let title_event = match &self.title {
Some(title) => Event::Title(title.clone()),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
if self.mode.contains(TermMode::ALT_SCREEN) {
self.inactive_grid.update_history(self.config.scrolling_history);
} else {
self.grid.update_history(self.config.scrolling_history);
}
if self.config.kitty_keyboard != old_config.kitty_keyboard {
self.keyboard_mode_stack = Vec::new();
self.inactive_keyboard_mode_stack = Vec::new();
self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);
}
// Damage everything on config updates.
self.mark_fully_damaged();
}
/// Convert the active selection to a String.
pub fn selection_to_string(&self) -> Option<String> {
let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;
let SelectionRange { start, end, .. } = selection_range;
let mut res = String::new();
match self.selection.as_ref() {
Some(Selection { ty: SelectionType::Block, .. }) => {
for line in (start.line.0..end.line.0).map(Line::from) {
res += self
.line_to_string(line, start.column..end.column, start.column.0 != 0)
.trim_end();
res += "\n";
}
res += self.line_to_string(end.line, start.column..end.column, true).trim_end();
},
Some(Selection { ty: SelectionType::Lines, .. }) => {
res = self.bounds_to_string(start, end) + "\n";
},
_ => {
res = self.bounds_to_string(start, end);
},
}
Some(res)
}
/// Convert range between two points to a String.
pub fn bounds_to_string(&self, start: Point, end: Point) -> String {
let mut res = String::new();
for line in (start.line.0..=end.line.0).map(Line::from) {
let start_col = if line == start.line { start.column } else { Column(0) };
let end_col = if line == end.line { end.column } else { self.last_column() };
res += &self.line_to_string(line, start_col..end_col, line == end.line);
}
res.strip_suffix('\n').map(str::to_owned).unwrap_or(res)
}
/// Convert a single line in the grid to a String.
fn line_to_string(
&self,
line: Line,
mut cols: Range<Column>,
include_wrapped_wide: bool,
) -> String {
let mut text = String::new();
let grid_line = &self.grid[line];
let line_length = cmp::min(grid_line.line_length(), cols.end + 1);
// Include wide char when trailing spacer is selected.
if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {
cols.start -= 1;
}
let mut tab_mode = false;
for column in (cols.start.0..line_length.0).map(Column::from) {
let cell = &grid_line[column];
// Skip over cells until next tab-stop once a tab was found.
if tab_mode {
if self.tabs[column] || cell.c != ' ' {
tab_mode = false;
} else {
continue;
}
}
if cell.c == '\t' {
tab_mode = true;
}
if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {
// Push cells primary character.
text.push(cell.c);
// Push zero-width characters.
for c in cell.zerowidth().into_iter().flatten() {
text.push(*c);
}
}
}
if cols.end >= self.columns() - 1
&& (line_length.0 == 0
|| !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))
{
text.push('\n');
}
// If wide char is not part of the selection, but leading spacer is, include it.
if line_length == self.columns()
&& line_length.0 >= 2
&& grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)
&& include_wrapped_wide
{
text.push(self.grid[line - 1i32][Column(0)].c);
}
text
}
/// Terminal content required for rendering.
#[inline]
pub fn renderable_content(&self) -> RenderableContent<'_>
where
T: EventListener,
{
RenderableContent::new(self)
}
/// Access to the raw grid data structure.
pub fn grid(&self) -> &Grid<Cell> {
&self.grid
}
/// Mutable access to the raw grid data structure.
pub fn grid_mut(&mut self) -> &mut Grid<Cell> {
&mut self.grid
}
/// Resize terminal to new dimensions.
pub fn resize<S: Dimensions>(&mut self, size: S) {
let old_cols = self.columns();
let old_lines = self.screen_lines();
let num_cols = size.columns();
let num_lines = size.screen_lines();
if old_cols == num_cols && old_lines == num_lines {
debug!("Term::resize dimensions unchanged");
return;
}
debug!("New num_cols is {} and num_lines is {}", num_cols, num_lines);
// Move vi mode cursor with the content.
let history_size = self.history_size();
let mut delta = num_lines as i32 - old_lines as i32;
let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);
delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);
self.vi_mode_cursor.point.line += delta;
let is_alt = self.mode.contains(TermMode::ALT_SCREEN);
self.grid.resize(!is_alt, num_lines, num_cols);
self.inactive_grid.resize(is_alt, num_lines, num_cols);
// Invalidate selection and tabs only when necessary.
if old_cols != num_cols {
self.selection = None;
// Recreate tabs list.
self.tabs.resize(num_cols);
} else if let Some(selection) = self.selection.take() {
let max_lines = cmp::max(num_lines, old_lines) as i32;
let range = Line(0)..Line(max_lines);
self.selection = selection.rotate(self, &range, -delta);
}
// Clamp vi cursor to viewport.
let vi_point = self.vi_mode_cursor.point;
let viewport_top = Line(-(self.grid.display_offset() as i32));
let viewport_bottom = viewport_top + self.bottommost_line();
self.vi_mode_cursor.point.line =
cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);
self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());
// Reset scrolling region.
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
// Resize damage information.
self.damage.resize(num_cols, num_lines);
}
/// Active terminal modes.
#[inline]
pub fn mode(&self) -> &TermMode {
&self.mode
}
/// Swap primary and alternate screen buffer.
pub fn swap_alt(&mut self) {
if !self.mode.contains(TermMode::ALT_SCREEN) {
// Set alt screen cursor to the current primary screen cursor.
self.inactive_grid.cursor = self.grid.cursor.clone();
// Drop information about the primary screens saved cursor.
self.grid.saved_cursor = self.grid.cursor.clone();
// Reset alternate screen contents.
self.inactive_grid.reset_region(..);
}
mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);
let keyboard_mode =
self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();
self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);
mem::swap(&mut self.grid, &mut self.inactive_grid);
self.mode ^= TermMode::ALT_SCREEN;
self.selection = None;
self.mark_fully_damaged();
}
/// Scroll screen down.
///
/// Text moves down; clear at bottom
/// Expects origin to be in scroll range.
#[inline]
fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling down relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection =
self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));
// Scroll vi mode cursor.
let line = &mut self.vi_mode_cursor.point.line;
if region.start <= *line && region.end > *line {
*line = cmp::min(*line + lines, region.end - 1);
}
// Scroll between origin and bottom
self.grid.scroll_down(®ion, lines);
self.mark_fully_damaged();
}
/// Scroll screen up
///
/// Text moves up; clear at top
/// Expects origin to be in scroll range.
#[inline]
fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling up relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));
self.grid.scroll_up(®ion, lines);
// Scroll vi mode cursor.
let viewport_top = Line(-(self.grid.display_offset() as i32));
let top = if region.start == 0 { viewport_top } else { region.start };
let line = &mut self.vi_mode_cursor.point.line;
if (top <= *line) && region.end > *line {
*line = cmp::max(*line - lines, top);
}
self.mark_fully_damaged();
}
fn deccolm(&mut self)
where
T: EventListener,
{
// Setting 132 column font makes no sense, but run the other side effects.
// Clear scrolling region.
self.set_scrolling_region(1, None);
// Clear grid.
self.grid.reset_region(..);
self.mark_fully_damaged();
}
#[inline]
pub fn exit(&mut self)
where
T: EventListener,
{
self.event_proxy.send_event(Event::Exit);
}
/// Toggle the vi mode.
#[inline]
pub fn toggle_vi_mode(&mut self)
where
T: EventListener,
{
self.mode ^= TermMode::VI;
if self.mode.contains(TermMode::VI) {
let display_offset = self.grid.display_offset() as i32;
if self.grid.cursor.point.line > self.bottommost_line() - display_offset {
// Move cursor to top-left if terminal cursor is not visible.
let point = Point::new(Line(-display_offset), Column(0));
self.vi_mode_cursor = ViModeCursor::new(point);
} else {
// Reset vi mode cursor position to match primary cursor.
self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);
}
}
// Update UI about cursor blinking state changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
/// Move vi mode cursor.
#[inline]
pub fn vi_motion(&mut self, motion: ViMotion)
where
T: EventListener,
{
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Move cursor.
self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);
self.vi_mode_recompute_selection();
}
/// Move vi cursor to a point in the grid.
#[inline]
pub fn vi_goto_point(&mut self, point: Point)
where
T: EventListener,
{
// Move viewport to make point visible.
self.scroll_to_point(point);
// Move vi cursor to the point.
self.vi_mode_cursor.point = point;
self.vi_mode_recompute_selection();
}
/// Update the active selection to match the vi mode cursor position.
#[inline]
fn vi_mode_recompute_selection(&mut self) {
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Update only if non-empty selection is present.
if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {
selection.update(self.vi_mode_cursor.point, Side::Left);
selection.include_all();
}
}
/// Scroll display to point if it is outside of viewport.
pub fn scroll_to_point(&mut self, point: Point)
where
T: EventListener,
{
let display_offset = self.grid.display_offset() as i32;
let screen_lines = self.grid.screen_lines() as i32;
if point.line < -display_offset {
let lines = point.line + display_offset;
self.scroll_display(Scroll::Delta(-lines.0));
} else if point.line >= (screen_lines - display_offset) {
let lines = point.line + display_offset - screen_lines + 1i32;
self.scroll_display(Scroll::Delta(-lines.0));
}
}
/// Jump to the end of a wide cell.
pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {
let flags = self.grid[point.line][point.column].flags;
match direction {
Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
point.column = Column(1);
point.line += 1;
},
Direction::Right if flags.contains(Flags::WIDE_CHAR) => {
point.column = cmp::min(point.column + 1, self.last_column());
},
Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {
if flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
let prev = point.sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
point = prev;
}
},
_ => (),
}
point
}
#[inline]
pub fn semantic_escape_chars(&self) -> &str {
&self.config.semantic_escape_chars
}
#[cfg(test)]
pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {
self.config.semantic_escape_chars = semantic_escape_chars.into();
}
/// Active terminal cursor style.
///
/// While vi mode is active, this will automatically return the vi mode cursor style.
#[inline]
pub fn cursor_style(&self) -> CursorStyle {
let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);
if self.mode.contains(TermMode::VI) {
self.config.vi_mode_cursor_style.unwrap_or(cursor_style)
} else {
cursor_style
}
}
pub fn colors(&self) -> &Colors {
&self.colors
}
/// Insert a linebreak at the current cursor position.
#[inline]
fn wrapline(&mut self)
where
T: EventListener,
{
if !self.mode.contains(TermMode::LINE_WRAP) {
return;
}
trace!("Wrapping input");
self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);
if self.grid.cursor.point.line + 1 >= self.scroll_region.end {
self.linefeed();
} else {
self.damage_cursor();
self.grid.cursor.point.line += 1;
}
self.grid.cursor.point.column = Column(0);
self.grid.cursor.input_needs_wrap = false;
self.damage_cursor();
}
/// Write `c` to the cell at the cursor position.
#[inline(always)]
fn write_at_cursor(&mut self, c: char) {
let c = self.grid.cursor.charsets[self.active_charset].map(c);
let fg = self.grid.cursor.template.fg;
let bg = self.grid.cursor.template.bg;
let flags = self.grid.cursor.template.flags;
let extra = self.grid.cursor.template.extra.clone();
let mut cursor_cell = self.grid.cursor_cell();
// Clear all related cells when overwriting a fullwidth cell.
if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {
// Remove wide char and spacer.
let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);
let point = self.grid.cursor.point;
if wide && point.column < self.last_column() {
self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);
} else if point.column > 0 {
self.grid[point.line][point.column - 1].clear_wide();
}
// Remove leading spacers.
if point.column <= 1 && point.line != self.topmost_line() {
let column = self.last_column();
self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
}
cursor_cell = self.grid.cursor_cell();
}
cursor_cell.c = c;
cursor_cell.fg = fg;
cursor_cell.bg = bg;
cursor_cell.flags = flags;
cursor_cell.extra = extra;
}
#[inline]
fn damage_cursor(&mut self) {
// The normal cursor coordinates are always in viewport.
let point =
Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);
self.damage.damage_point(point);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {
let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;
self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;
let new_mode = match apply {
KeyboardModesApplyBehavior::Replace => mode,
KeyboardModesApplyBehavior::Union => active_mode.union(mode),
KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),
};
trace!("Setting keyboard mode to {new_mode:?}");
self.mode |= new_mode;
}
}
impl<T> Dimensions for Term<T> {
#[inline]
fn columns(&self) -> usize {
self.grid.columns()
}
#[inline]
fn screen_lines(&self) -> usize {
self.grid.screen_lines()
}
#[inline]
fn total_lines(&self) -> usize {
self.grid.total_lines()
}
}
impl<T: EventListener> Handler for Term<T> {
/// A character to be displayed.
#[inline(never)]
fn input(&mut self, c: char) {
// Number of cells the char will occupy.
let width = match c.width() {
Some(width) => width,
None => return,
};
// Handle zero-width characters.
if width == 0 {
// Get previous column.
let mut column = self.grid.cursor.point.column;
if !self.grid.cursor.input_needs_wrap {
column.0 = column.saturating_sub(1);
}
// Put zerowidth characters over first fullwidth character cell.
let line = self.grid.cursor.point.line;
if self.grid[line][column].flags.contains(Flags::WIDE_CHAR_SPACER) {
column.0 = column.saturating_sub(1);
}
self.grid[line][column].push_zerowidth(c);
return;
}
// Move cursor to next line.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
}
// If in insert mode, first shift cells to the right.
let columns = self.columns();
if self.mode.contains(TermMode::INSERT) && self.grid.cursor.point.column + width < columns {
let line = self.grid.cursor.point.line;
let col = self.grid.cursor.point.column;
let row = &mut self.grid[line][..];
for col in (col.0..(columns - width)).rev() {
row.swap(col + width, col);
}
}
if width == 1 {
self.write_at_cursor(c);
} else {
if self.grid.cursor.point.column + 1 >= columns {
if self.mode.contains(TermMode::LINE_WRAP) {
// Insert placeholder before wide char if glyph does not fit in this row.
self.grid.cursor.template.flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
self.wrapline();
} else {
// Prevent out of bounds crash when linewrapping is disabled.
self.grid.cursor.input_needs_wrap = true;
return;
}
}
// Write full width glyph to current cursor cell.
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR);
self.write_at_cursor(c);
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR);
// Write spacer to cell following the wide glyph.
self.grid.cursor.point.column += 1;
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR_SPACER);
}
if self.grid.cursor.point.column + 1 < columns {
self.grid.cursor.point.column += 1;
} else {
self.grid.cursor.input_needs_wrap = true;
}
}
#[inline]
fn decaln(&mut self) {
trace!("Decalnning");
for line in (0..self.screen_lines()).map(Line::from) {
for column in 0..self.columns() {
let cell = &mut self.grid[line][Column(column)];
*cell = Cell::default();
cell.c = 'E';
}
}
self.mark_fully_damaged();
}
#[inline]
fn goto(&mut self, line: i32, col: usize) {
let line = Line(line);
let col = Column(col);
trace!("Going to: line={}, col={}", line, col);
let (y_offset, max_y) = if self.mode.contains(TermMode::ORIGIN) {
(self.scroll_region.start, self.scroll_region.end - 1)
} else {
(Line(0), self.bottommost_line())
};
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(cmp::min(line + y_offset, max_y), Line(0));
self.grid.cursor.point.column = cmp::min(col, self.last_column());
self.damage_cursor();
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn goto_line(&mut self, line: i32) {
trace!("Going to line: {}", line);
self.goto(line, self.grid.cursor.point.column.0)
}
#[inline]
fn goto_col(&mut self, col: usize) {
trace!("Going to column: {}", col);
self.goto(self.grid.cursor.point.line.0, col)
}
#[inline]
fn insert_blank(&mut self, count: usize) {
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure inserting within terminal bounds
let count = cmp::min(count, self.columns() - cursor.point.column.0);
let source = cursor.point.column;
let destination = cursor.point.column.0 + count;
let num_cells = self.columns() - destination;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in (0..num_cells).rev() {
row.swap(destination + offset, source.0 + offset);
}
// Cells were just moved out toward the end of the line;
// fill in between source and dest with blanks.
for cell in &mut row[source.0..destination] {
*cell = bg.into();
}
}
#[inline]
fn move_up(&mut self, lines: usize) {
trace!("Moving up: {}", lines);
let line = self.grid.cursor.point.line - lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_down(&mut self, lines: usize) {
trace!("Moving down: {}", lines);
let line = self.grid.cursor.point.line + lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_forward(&mut self, cols: usize) {
trace!("Moving forward: {}", cols);
let last_column = cmp::min(self.grid.cursor.point.column + cols, self.last_column());
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, self.grid.cursor.point.column.0, last_column.0);
self.grid.cursor.point.column = last_column;
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn move_backward(&mut self, cols: usize) {
trace!("Moving backward: {}", cols);
let column = self.grid.cursor.point.column.saturating_sub(cols);
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, column, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(column);
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn identify_terminal(&mut self, intermediate: Option<char>) {
match intermediate {
None => {
trace!("Reporting primary device attributes");
let text = String::from("\x1b[?6c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
Some('>') => {
trace!("Reporting secondary device attributes");
let version = version_number(env!("CARGO_PKG_VERSION"));
let text = format!("\x1b[>0;{version};1c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("Unsupported device attributes intermediate"),
}
}
#[inline]
fn report_keyboard_mode(&mut self) {
if !self.config.kitty_keyboard {
return;
}
trace!("Reporting active keyboard mode");
let current_mode =
self.keyboard_mode_stack.last().unwrap_or(&KeyboardModes::NO_MODE).bits();
let text = format!("\x1b[?{current_mode}u");
self.event_proxy.send_event(Event::PtyWrite(text));
}
#[inline]
fn push_keyboard_mode(&mut self, mode: KeyboardModes) {
if !self.config.kitty_keyboard {
return;
}
trace!("Pushing `{mode:?}` keyboard mode into the stack");
if self.keyboard_mode_stack.len() >= KEYBOARD_MODE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of keyboard mode stack that exceeds its maximum depth",
removed
);
}
self.keyboard_mode_stack.push(mode);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn pop_keyboard_modes(&mut self, to_pop: u16) {
if !self.config.kitty_keyboard {
return;
}
trace!("Attempting to pop {to_pop} keyboard modes from the stack");
let new_len = self.keyboard_mode_stack.len().saturating_sub(to_pop as usize);
self.keyboard_mode_stack.truncate(new_len);
// Reload active mode.
let mode = self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: KeyboardModes, apply: KeyboardModesApplyBehavior) {
if !self.config.kitty_keyboard {
return;
}
self.set_keyboard_mode(mode.into(), apply);
}
#[inline]
fn device_status(&mut self, arg: usize) {
trace!("Reporting device status: {}", arg);
match arg {
5 => {
let text = String::from("\x1b[0n");
self.event_proxy.send_event(Event::PtyWrite(text));
},
6 => {
let pos = self.grid.cursor.point;
let text = format!("\x1b[{};{}R", pos.line + 1, pos.column + 1);
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("unknown device status query: {}", arg),
};
}
#[inline]
fn move_down_and_cr(&mut self, lines: usize) {
trace!("Moving down and cr: {}", lines);
let line = self.grid.cursor.point.line + lines;
self.goto(line.0, 0)
}
#[inline]
fn move_up_and_cr(&mut self, lines: usize) {
trace!("Moving up and cr: {}", lines);
let line = self.grid.cursor.point.line - lines;
self.goto(line.0, 0)
}
/// Insert tab at cursor position.
#[inline]
fn put_tab(&mut self, mut count: u16) {
// A tab after the last column is the same as a linebreak.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
return;
}
while self.grid.cursor.point.column < self.columns() && count != 0 {
count -= 1;
let c = self.grid.cursor.charsets[self.active_charset].map('\t');
let cell = self.grid.cursor_cell();
if cell.c == ' ' {
cell.c = c;
}
loop {
if (self.grid.cursor.point.column + 1) == self.columns() {
break;
}
self.grid.cursor.point.column += 1;
if self.tabs[self.grid.cursor.point.column] {
break;
}
}
}
}
/// Backspace.
#[inline]
fn backspace(&mut self) {
trace!("Backspace");
if self.grid.cursor.point.column > Column(0) {
let line = self.grid.cursor.point.line.0 as usize;
let column = self.grid.cursor.point.column.0;
self.grid.cursor.point.column -= 1;
self.grid.cursor.input_needs_wrap = false;
self.damage.damage_line(line, column - 1, column);
}
}
/// Carriage return.
#[inline]
fn carriage_return(&mut self) {
trace!("Carriage return");
let new_col = 0;
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, new_col, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(new_col);
self.grid.cursor.input_needs_wrap = false;
}
/// Linefeed.
#[inline]
fn linefeed(&mut self) {
trace!("Linefeed");
let next = self.grid.cursor.point.line + 1;
if next == self.scroll_region.end {
self.scroll_up(1);
} else if next < self.screen_lines() {
self.damage_cursor();
self.grid.cursor.point.line += 1;
self.damage_cursor();
}
}
/// Set current position as a tabstop.
#[inline]
fn bell(&mut self) {
trace!("Bell");
self.event_proxy.send_event(Event::Bell);
}
#[inline]
fn substitute(&mut self) {
trace!("[unimplemented] Substitute");
}
/// Run LF/NL.
///
/// LF/NL mode has some interesting history. According to ECMA-48 4th
/// edition, in LINE FEED mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause only movement of the active position in
/// > the direction of the line progression.
///
/// In NEW LINE mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause movement to the line home position on
/// > the following line, the following form, etc. In the case of LF this is
/// > referred to as the New Line (NL) option.
///
/// Additionally, ECMA-48 4th edition says that this option is deprecated.
/// ECMA-48 5th edition only mentions this option (without explanation)
/// saying that it's been removed.
///
/// As an emulator, we need to support it since applications may still rely
/// on it.
#[inline]
fn newline(&mut self) {
self.linefeed();
if self.mode.contains(TermMode::LINE_FEED_NEW_LINE) {
self.carriage_return();
}
}
#[inline]
fn set_horizontal_tabstop(&mut self) {
trace!("Setting horizontal tabstop");
self.tabs[self.grid.cursor.point.column] = true;
}
#[inline]
fn scroll_up(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_up_relative(origin, lines);
}
#[inline]
fn scroll_down(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_down_relative(origin, lines);
}
#[inline]
fn insert_blank_lines(&mut self, lines: usize) {
trace!("Inserting blank {} lines", lines);
let origin = self.grid.cursor.point.line;
if self.scroll_region.contains(&origin) {
self.scroll_down_relative(origin, lines);
}
}
#[inline]
fn delete_lines(&mut self, lines: usize) {
let origin = self.grid.cursor.point.line;
let lines = cmp::min(self.screen_lines() - origin.0 as usize, lines);
trace!("Deleting {} lines", lines);
if lines > 0 && self.scroll_region.contains(&origin) {
self.scroll_up_relative(origin, lines);
}
}
#[inline]
fn erase_chars(&mut self, count: usize) {
let cursor = &self.grid.cursor;
trace!("Erasing chars: count={}, col={}", count, cursor.point.column);
let start = cursor.point.column;
let end = cmp::min(start + count, Column(self.columns()));
// Cleared cells have current background color set.
let bg = self.grid.cursor.template.bg;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, start.0, end.0);
let row = &mut self.grid[line];
for cell in &mut row[start..end] {
*cell = bg.into();
}
}
#[inline]
fn delete_chars(&mut self, count: usize) {
let columns = self.columns();
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure deleting within terminal bounds.
let count = cmp::min(count, columns);
let start = cursor.point.column.0;
let end = cmp::min(start + count, columns - 1);
let num_cells = columns - end;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in 0..num_cells {
row.swap(start + offset, end + offset);
}
// Clear last `count` cells in the row. If deleting 1 char, need to delete
// 1 cell.
let end = columns - count;
for cell in &mut row[end..] {
*cell = bg.into();
}
}
#[inline]
fn move_backward_tabs(&mut self, count: u16) {
trace!("Moving backward {} tabs", count);
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == 0 {
break;
}
for i in (0..(col.0)).rev() {
if self.tabs[index::Column(i)] {
col = index::Column(i);
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, self.grid.cursor.point.column.0, old_col);
}
#[inline]
fn move_forward_tabs(&mut self, count: u16) {
trace!("Moving forward {} tabs", count);
let num_cols = self.columns();
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == num_cols - 1 {
break;
}
for i in col.0 + 1..num_cols {
col = index::Column(i);
if self.tabs[col] {
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, old_col, self.grid.cursor.point.column.0);
}
#[inline]
fn save_cursor_position(&mut self) {
trace!("Saving cursor position");
self.grid.saved_cursor = self.grid.cursor.clone();
}
#[inline]
fn restore_cursor_position(&mut self) {
trace!("Restoring cursor position");
self.damage_cursor();
self.grid.cursor = self.grid.saved_cursor.clone();
self.damage_cursor();
}
#[inline]
fn clear_line(&mut self, mode: ansi::LineClearMode) {
trace!("Clearing line: {:?}", mode);
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
let point = cursor.point;
let (left, right) = match mode {
ansi::LineClearMode::Right if cursor.input_needs_wrap => return,
ansi::LineClearMode::Right => (point.column, Column(self.columns())),
ansi::LineClearMode::Left => (Column(0), point.column + 1),
ansi::LineClearMode::All => (Column(0), Column(self.columns())),
};
self.damage.damage_line(point.line.0 as usize, left.0, right.0 - 1);
let row = &mut self.grid[point.line];
for cell in &mut row[left..right] {
*cell = bg.into();
}
let range = self.grid.cursor.point.line..=self.grid.cursor.point.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
}
/// Set the indexed color value.
#[inline]
fn set_color(&mut self, index: usize, color: Rgb) {
trace!("Setting color[{}] = {:?}", index, color);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index] != Some(color) {
self.mark_fully_damaged();
}
self.colors[index] = Some(color);
}
/// Respond to a color query escape sequence.
#[inline]
fn dynamic_color_sequence(&mut self, prefix: String, index: usize, terminator: &str) {
trace!("Requested write of escape sequence for color code {}: color[{}]", prefix, index);
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ColorRequest(
index,
Arc::new(move |color| {
format!(
"\x1b]{};rgb:{1:02x}{1:02x}/{2:02x}{2:02x}/{3:02x}{3:02x}{4}",
prefix, color.r, color.g, color.b, terminator
)
}),
));
}
/// Reset the indexed color to original value.
#[inline]
fn reset_color(&mut self, index: usize) {
trace!("Resetting color[{}]", index);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index].is_some() {
self.mark_fully_damaged();
}
self.colors[index] = None;
}
/// Store data into clipboard.
#[inline]
fn clipboard_store(&mut self, clipboard: u8, base64: &[u8]) {
if !matches!(self.config.osc52, Osc52::OnlyCopy | Osc52::CopyPaste) {
debug!("Denied osc52 store");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
if let Ok(bytes) = Base64.decode(base64) {
if let Ok(text) = String::from_utf8(bytes) {
self.event_proxy.send_event(Event::ClipboardStore(clipboard_type, text));
}
}
}
/// Load data from clipboard.
#[inline]
fn clipboard_load(&mut self, clipboard: u8, terminator: &str) {
if !matches!(self.config.osc52, Osc52::OnlyPaste | Osc52::CopyPaste) {
debug!("Denied osc52 load");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ClipboardLoad(
clipboard_type,
Arc::new(move |text| {
let base64 = Base64.encode(text);
format!("\x1b]52;{};{}{}", clipboard as char, base64, terminator)
}),
));
}
#[inline]
fn clear_screen(&mut self, mode: ansi::ClearMode) {
trace!("Clearing screen: {:?}", mode);
let bg = self.grid.cursor.template.bg;
let screen_lines = self.screen_lines();
match mode {
ansi::ClearMode::Above => {
let cursor = self.grid.cursor.point;
// If clearing more than one line.
if cursor.line > 1 {
// Fully clear all lines before the current line.
self.grid.reset_region(..cursor.line);
}
// Clear up to the current column in the current line.
let end = cmp::min(cursor.column + 1, Column(self.columns()));
for cell in &mut self.grid[cursor.line][..end] {
*cell = bg.into();
}
let range = Line(0)..=cursor.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::Below => {
let cursor = self.grid.cursor.point;
for cell in &mut self.grid[cursor.line][cursor.column..] {
*cell = bg.into();
}
if (cursor.line.0 as usize) < screen_lines - 1 {
self.grid.reset_region((cursor.line + 1)..);
}
let range = cursor.line..Line(screen_lines as i32);
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::All => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.grid.reset_region(..);
} else {
let old_offset = self.grid.display_offset();
self.grid.clear_viewport();
// Compute number of lines scrolled by clearing the viewport.
let lines = self.grid.display_offset().saturating_sub(old_offset);
self.vi_mode_cursor.point.line =
(self.vi_mode_cursor.point.line - lines).grid_clamp(self, Boundary::Grid);
}
self.selection = None;
},
ansi::ClearMode::Saved if self.history_size() > 0 => {
self.grid.clear_history();
self.vi_mode_cursor.point.line =
self.vi_mode_cursor.point.line.grid_clamp(self, Boundary::Cursor);
self.selection = self.selection.take().filter(|s| !s.intersects_range(..Line(0)));
},
// We have no history to clear.
ansi::ClearMode::Saved => (),
}
self.mark_fully_damaged();
}
#[inline]
fn clear_tabs(&mut self, mode: ansi::TabulationClearMode) {
trace!("Clearing tabs: {:?}", mode);
match mode {
ansi::TabulationClearMode::Current => {
self.tabs[self.grid.cursor.point.column] = false;
},
ansi::TabulationClearMode::All => {
self.tabs.clear_all();
},
}
}
/// Reset all important fields in the term struct.
#[inline]
fn reset_state(&mut self) {
if self.mode.contains(TermMode::ALT_SCREEN) {
mem::swap(&mut self.grid, &mut self.inactive_grid);
}
self.active_charset = Default::default();
self.cursor_style = None;
self.grid.reset();
self.inactive_grid.reset();
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
self.tabs = TabStops::new(self.columns());
self.title_stack = Vec::new();
self.title = None;
self.selection = None;
self.vi_mode_cursor = Default::default();
self.keyboard_mode_stack = Default::default();
self.inactive_keyboard_mode_stack = Default::default();
// Preserve vi mode across resets.
self.mode &= TermMode::VI;
self.mode.insert(TermMode::default());
self.event_proxy.send_event(Event::CursorBlinkingChange);
self.mark_fully_damaged();
}
#[inline]
fn reverse_index(&mut self) {
trace!("Reversing index");
// If cursor is at the top.
if self.grid.cursor.point.line == self.scroll_region.start {
self.scroll_down(1);
} else {
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(self.grid.cursor.point.line - 1, Line(0));
self.damage_cursor();
}
}
#[inline]
fn set_hyperlink(&mut self, hyperlink: Option<Hyperlink>) {
trace!("Setting hyperlink: {:?}", hyperlink);
self.grid.cursor.template.set_hyperlink(hyperlink.map(|e| e.into()));
}
/// Set a terminal attribute.
#[inline]
fn terminal_attribute(&mut self, attr: Attr) {
trace!("Setting attribute: {:?}", attr);
let cursor = &mut self.grid.cursor;
match attr {
Attr::Foreground(color) => cursor.template.fg = color,
Attr::Background(color) => cursor.template.bg = color,
Attr::UnderlineColor(color) => cursor.template.set_underline_color(color),
Attr::Reset => {
cursor.template.fg = Color::Named(NamedColor::Foreground);
cursor.template.bg = Color::Named(NamedColor::Background);
cursor.template.flags = Flags::empty();
cursor.template.set_underline_color(None);
},
Attr::Reverse => cursor.template.flags.insert(Flags::INVERSE),
Attr::CancelReverse => cursor.template.flags.remove(Flags::INVERSE),
Attr::Bold => cursor.template.flags.insert(Flags::BOLD),
Attr::CancelBold => cursor.template.flags.remove(Flags::BOLD),
Attr::Dim => cursor.template.flags.insert(Flags::DIM),
Attr::CancelBoldDim => cursor.template.flags.remove(Flags::BOLD | Flags::DIM),
Attr::Italic => cursor.template.flags.insert(Flags::ITALIC),
Attr::CancelItalic => cursor.template.flags.remove(Flags::ITALIC),
Attr::Underline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERLINE);
},
Attr::DoubleUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOUBLE_UNDERLINE);
},
Attr::Undercurl => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERCURL);
},
Attr::DottedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOTTED_UNDERLINE);
},
Attr::DashedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DASHED_UNDERLINE);
},
Attr::CancelUnderline => cursor.template.flags.remove(Flags::ALL_UNDERLINES),
Attr::Hidden => cursor.template.flags.insert(Flags::HIDDEN),
Attr::CancelHidden => cursor.template.flags.remove(Flags::HIDDEN),
Attr::Strike => cursor.template.flags.insert(Flags::STRIKEOUT),
Attr::CancelStrike => cursor.template.flags.remove(Flags::STRIKEOUT),
_ => {
debug!("Term got unhandled attr: {:?}", attr);
},
}
}
#[inline]
fn set_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_private_mode", mode);
return;
},
};
trace!("Setting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.insert(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if !self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.insert(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.insert(TermMode::APP_CURSOR),
// Mouse protocols are mutually exclusive.
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.insert(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.insert(TermMode::BRACKETED_PASTE),
// Mouse encodings are mutually exclusive.
NamedPrivateMode::SgrMouse => {
self.mode.remove(TermMode::UTF8_MOUSE);
self.mode.insert(TermMode::SGR_MOUSE);
},
NamedPrivateMode::Utf8Mouse => {
self.mode.remove(TermMode::SGR_MOUSE);
self.mode.insert(TermMode::UTF8_MOUSE);
},
NamedPrivateMode::AlternateScroll => self.mode.insert(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.insert(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.insert(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = true;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn unset_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_private_mode", mode);
return;
},
};
trace!("Unsetting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.remove(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.remove(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.remove(TermMode::APP_CURSOR),
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.remove(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.remove(TermMode::BRACKETED_PASTE),
NamedPrivateMode::SgrMouse => self.mode.remove(TermMode::SGR_MOUSE),
NamedPrivateMode::Utf8Mouse => self.mode.remove(TermMode::UTF8_MOUSE),
NamedPrivateMode::AlternateScroll => self.mode.remove(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.remove(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.remove(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = false;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn report_private_mode(&mut self, mode: PrivateMode) {
trace!("Reporting private mode {mode:?}");
let state = match mode {
PrivateMode::Named(mode) => match mode {
NamedPrivateMode::CursorKeys => self.mode.contains(TermMode::APP_CURSOR).into(),
NamedPrivateMode::Origin => self.mode.contains(TermMode::ORIGIN).into(),
NamedPrivateMode::LineWrap => self.mode.contains(TermMode::LINE_WRAP).into(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking.into()
},
NamedPrivateMode::ShowCursor => self.mode.contains(TermMode::SHOW_CURSOR).into(),
NamedPrivateMode::ReportMouseClicks => {
self.mode.contains(TermMode::MOUSE_REPORT_CLICK).into()
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.contains(TermMode::MOUSE_DRAG).into()
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.contains(TermMode::MOUSE_MOTION).into()
},
NamedPrivateMode::ReportFocusInOut => {
self.mode.contains(TermMode::FOCUS_IN_OUT).into()
},
NamedPrivateMode::Utf8Mouse => self.mode.contains(TermMode::UTF8_MOUSE).into(),
NamedPrivateMode::SgrMouse => self.mode.contains(TermMode::SGR_MOUSE).into(),
NamedPrivateMode::AlternateScroll => {
self.mode.contains(TermMode::ALTERNATE_SCROLL).into()
},
NamedPrivateMode::UrgencyHints => {
self.mode.contains(TermMode::URGENCY_HINTS).into()
},
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
self.mode.contains(TermMode::ALT_SCREEN).into()
},
NamedPrivateMode::BracketedPaste => {
self.mode.contains(TermMode::BRACKETED_PASTE).into()
},
NamedPrivateMode::SyncUpdate => ModeState::Reset,
NamedPrivateMode::ColumnMode => ModeState::NotSupported,
},
PrivateMode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[?{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => self.mode.insert(TermMode::INSERT),
NamedMode::LineFeedNewLine => self.mode.insert(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn unset_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => {
self.mode.remove(TermMode::INSERT);
self.mark_fully_damaged();
},
NamedMode::LineFeedNewLine => self.mode.remove(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn report_mode(&mut self, mode: ansi::Mode) {
trace!("Reporting mode {mode:?}");
let state = match mode {
ansi::Mode::Named(mode) => match mode {
NamedMode::Insert => self.mode.contains(TermMode::INSERT).into(),
NamedMode::LineFeedNewLine => {
self.mode.contains(TermMode::LINE_FEED_NEW_LINE).into()
},
},
ansi::Mode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_scrolling_region(&mut self, top: usize, bottom: Option<usize>) {
// Fallback to the last line as default.
let bottom = bottom.unwrap_or_else(|| self.screen_lines());
if top >= bottom {
debug!("Invalid scrolling region: ({};{})", top, bottom);
return;
}
// Bottom should be included in the range, but range end is not
// usually included. One option would be to use an inclusive
// range, but instead we just let the open range end be 1
// higher.
let start = Line(top as i32 - 1);
let end = Line(bottom as i32);
trace!("Setting scrolling region: ({};{})", start, end);
let screen_lines = Line(self.screen_lines() as i32);
self.scroll_region.start = cmp::min(start, screen_lines);
self.scroll_region.end = cmp::min(end, screen_lines);
self.goto(0, 0);
}
#[inline]
fn set_keypad_application_mode(&mut self) {
trace!("Setting keypad application mode");
self.mode.insert(TermMode::APP_KEYPAD);
}
#[inline]
fn unset_keypad_application_mode(&mut self) {
trace!("Unsetting keypad application mode");
self.mode.remove(TermMode::APP_KEYPAD);
}
#[inline]
fn configure_charset(&mut self, index: CharsetIndex, charset: StandardCharset) {
trace!("Configuring charset {:?} as {:?}", index, charset);
self.grid.cursor.charsets[index] = charset;
}
#[inline]
fn set_active_charset(&mut self, index: CharsetIndex) {
trace!("Setting active charset {:?}", index);
self.active_charset = index;
}
#[inline]
fn set_cursor_style(&mut self, style: Option<CursorStyle>) {
trace!("Setting cursor style {:?}", style);
self.cursor_style = style;
// Notify UI about blinking changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
#[inline]
fn set_cursor_shape(&mut self, shape: CursorShape) {
trace!("Setting cursor shape {:?}", shape);
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.shape = shape;
}
#[inline]
fn set_title(&mut self, title: Option<String>) {
trace!("Setting title to '{:?}'", title);
self.title.clone_from(&title);
let title_event = match title {
Some(title) => Event::Title(title),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
}
#[inline]
fn push_title(&mut self) {
trace!("Pushing '{:?}' onto title stack", self.title);
if self.title_stack.len() >= TITLE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of title stack that exceeds its maximum depth",
removed
);
}
self.title_stack.push(self.title.clone());
}
#[inline]
fn pop_title(&mut self) {
trace!("Attempting to pop title from stack...");
if let Some(popped) = self.title_stack.pop() {
trace!("Title '{:?}' popped from stack", popped);
self.set_title(popped);
}
}
#[inline]
fn text_area_size_pixels(&mut self) {
self.event_proxy.send_event(Event::TextAreaSizeRequest(Arc::new(move |window_size| {
let height = window_size.num_lines * window_size.cell_height;
let width = window_size.num_cols * window_size.cell_width;
format!("\x1b[4;{height};{width}t")
})));
}
#[inline]
fn text_area_size_chars(&mut self) {
let text = format!("\x1b[8;{};{}t", self.screen_lines(), self.columns());
self.event_proxy.send_event(Event::PtyWrite(text));
}
}
/// The state of the [`Mode`] and [`PrivateMode`].
#[repr(u8)]
#[derive(Debug, Clone, Copy)]
enum ModeState {
/// The mode is not supported.
NotSupported = 0,
/// The mode is currently set.
Set = 1,
/// The mode is currently not set.
Reset = 2,
}
impl From<bool> for ModeState {
fn from(value: bool) -> Self {
if value {
Self::Set
} else {
Self::Reset
}
}
}
/// Terminal version for escape sequence reports.
///
/// This returns the current terminal version as a unique number based on alacritty_terminal's
/// semver version. The different versions are padded to ensure that a higher semver version will
/// always report a higher version number.
fn version_number(mut version: &str) -> usize {
if let Some(separator) = version.rfind('-') {
version = &version[..separator];
}
let mut version_number = 0;
let semver_versions = version.split('.');
for (i, semver_version) in semver_versions.rev().enumerate() {
let semver_number = semver_version.parse::<usize>().unwrap_or(0);
version_number += usize::pow(100, i as u32) * semver_number;
}
version_number
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClipboardType {
Clipboard,
Selection,
}
struct TabStops {
tabs: Vec<bool>,
}
impl TabStops {
#[inline]
fn new(columns: usize) -> TabStops {
TabStops { tabs: (0..columns).map(|i| i % INITIAL_TABSTOPS == 0).collect() }
}
/// Remove all tabstops.
#[inline]
fn clear_all(&mut self) {
unsafe {
ptr::write_bytes(self.tabs.as_mut_ptr(), 0, self.tabs.len());
}
}
/// Increase tabstop capacity.
#[inline]
fn resize(&mut self, columns: usize) {
let mut index = self.tabs.len();
self.tabs.resize_with(columns, || {
let is_tabstop = index % INITIAL_TABSTOPS == 0;
index += 1;
is_tabstop
});
}
}
impl Index<Column> for TabStops {
type Output = bool;
fn index(&self, index: Column) -> &bool {
&self.tabs[index.0]
}
}
impl IndexMut<Column> for TabStops {
fn index_mut(&mut self, index: Column) -> &mut bool {
self.tabs.index_mut(index.0)
}
}
/// Terminal cursor rendering information.
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct RenderableCursor {
pub shape: CursorShape,
pub point: Point,
}
impl RenderableCursor {
fn new<T>(term: &Term<T>) -> Self {
// Cursor position.
let vi_mode = term.mode().contains(TermMode::VI);
let mut point = if vi_mode { term.vi_mode_cursor.point } else { term.grid.cursor.point };
if term.grid[point].flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
// Cursor shape.
let shape = if !vi_mode && !term.mode().contains(TermMode::SHOW_CURSOR) {
CursorShape::Hidden
} else {
term.cursor_style().shape
};
Self { shape, point }
}
}
/// Visible terminal content.
///
/// This contains all content required to render the current terminal view.
pub struct RenderableContent<'a> {
pub display_iter: GridIterator<'a, Cell>,
pub selection: Option<SelectionRange>,
pub cursor: RenderableCursor,
pub display_offset: usize,
pub colors: &'a color::Colors,
pub mode: TermMode,
}
impl<'a> RenderableContent<'a> {
fn new<T>(term: &'a Term<T>) -> Self {
Self {
display_iter: term.grid().display_iter(),
display_offset: term.grid().display_offset(),
cursor: RenderableCursor::new(term),
selection: term.selection.as_ref().and_then(|s| s.to_range(term)),
colors: &term.colors,
mode: *term.mode(),
}
}
}
/// Terminal test helpers.
pub mod test {
use super::*;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::VoidListener;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TermSize {
pub columns: usize,
pub screen_lines: usize,
}
impl TermSize {
pub fn new(columns: usize, screen_lines: usize) -> Self {
Self { columns, screen_lines }
}
}
impl Dimensions for TermSize {
fn total_lines(&self) -> usize {
self.screen_lines()
}
fn screen_lines(&self) -> usize {
self.screen_lines
}
fn columns(&self) -> usize {
self.columns
}
}
/// Construct a terminal from its content as string.
///
/// A `\n` will break line and `\r\n` will break line without wrapping.
///
/// # Examples
///
/// ```rust
/// use alacritty_terminal::term::test::mock_term;
///
/// // Create a terminal with the following cells:
/// //
/// // [h][e][l][l][o] <- WRAPLINE flag set
/// // [:][)][ ][ ][ ]
/// // [t][e][s][t][ ]
/// mock_term(
/// "\
/// hello\n:)\r\ntest",
/// );
/// ```
pub fn mock_term(content: &str) -> Term<VoidListener> {
let lines: Vec<&str> = content.split('\n').collect();
let num_cols = lines
.iter()
.map(|line| line.chars().filter(|c| *c != '\r').map(|c| c.width().unwrap()).sum())
.max()
.unwrap_or(0);
// Create terminal with the appropriate dimensions.
let size = TermSize::new(num_cols, lines.len());
let mut term = Term::new(Config::default(), &size, VoidListener);
// Fill terminal with content.
for (line, text) in lines.iter().enumerate() {
let line = Line(line as i32);
if !text.ends_with('\r') && line + 1 != lines.len() {
term.grid[line][Column(num_cols - 1)].flags.insert(Flags::WRAPLINE);
}
let mut index = 0;
for c in text.chars().take_while(|c| *c != '\r') {
term.grid[line][Column(index)].c = c;
// Handle fullwidth characters.
let width = c.width().unwrap();
if width == 2 {
term.grid[line][Column(index)].flags.insert(Flags::WIDE_CHAR);
term.grid[line][Column(index + 1)].flags.insert(Flags::WIDE_CHAR_SPACER);
}
index += width;
}
}
term
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
use crate::event::VoidListener;
use crate::grid::{Grid, Scroll};
use crate::index::{Column, Point, Side};
use crate::selection::{Selection, SelectionType};
use crate::term::cell::{Cell, Flags};
use crate::term::test::TermSize;
use crate::vte::ansi::{self, CharsetIndex, Handler, StandardCharset};
#[test]
fn scroll_display_page_up() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Scrollable amount to top is 11.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 10);
// Scrollable amount to top is 1.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
// Scrollable amount to top is 0.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
}
#[test]
fn scroll_display_page_down() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Change display_offset to topmost.
term.grid_mut().scroll_display(Scroll::Top);
term.vi_mode_cursor = ViModeCursor::new(Point::new(Line(-11), Column(0)));
// Scrollable amount to bottom is 11.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 1);
// Scrollable amount to bottom is 1.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
// Scrollable amount to bottom is 0.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn simple_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 0..4 {
if i == 1 {
continue;
}
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(0)].c = ' ';
grid[Line(2)][Column(4)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(0)].c = ' ';
// Multiple lines contain an empty line.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(0), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(2), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\"aaa\"\n\n aaa ")));
// A wrapline.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(2), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from(" aaa aaa\"")));
}
#[test]
fn semantic_selection_works() {
let size = TermSize::new(5, 3);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(3, 5, 0);
for i in 0..5 {
for j in 0..2 {
grid[Line(j)][Column(i)].c = 'a';
}
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
grid[Line(1)][Column(2)].c = '"';
grid[Line(0)][Column(4)].flags.insert(Flags::WRAPLINE);
let mut escape_chars = String::from("\"");
mem::swap(&mut term.grid, &mut grid);
mem::swap(&mut term.config.semantic_escape_chars, &mut escape_chars);
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(4) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(1), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
}
#[test]
fn line_selection_works() {
let size = TermSize::new(5, 1);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(1, 5, 0);
for i in 0..5 {
grid[Line(0)][Column(i)].c = 'a';
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
mem::swap(&mut term.grid, &mut grid);
term.selection = Some(Selection::new(
SelectionType::Lines,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("\"aa\"a\n")));
}
#[test]
fn block_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 1..4 {
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(2)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(4)].c = ' ';
term.selection = Some(Selection::new(
SelectionType::Block,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
// The same column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(3) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\na\na")));
// The first column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(0) }, Side::Left);
}
assert_eq!(term.selection_to_string(), Some(String::from("\n\"aa\n\"a\n\"aa")));
// The last column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\"\na\"\na")));
}
/// Check that the grid can be serialized back and forth losslessly.
///
/// This test is in the term module as opposed to the grid since we want to
/// test this property with a T=Cell.
#[test]
#[cfg(feature = "serde")]
fn grid_serde() {
let grid: Grid<Cell> = Grid::new(24, 80, 0);
let serialized = serde_json::to_string(&grid).expect("ser");
let deserialized = serde_json::from_str::<Grid<Cell>>(&serialized).expect("de");
assert_eq!(deserialized, grid);
}
#[test]
fn input_line_drawing_character() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
let cursor = Point::new(Line(0), Column(0));
term.configure_charset(CharsetIndex::G0, StandardCharset::SpecialCharacterAndLineDrawing);
term.input('a');
assert_eq!(term.grid()[cursor].c, '▒');
}
#[test]
fn clearing_viewport_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
}
#[test]
fn clearing_viewport_with_vi_mode_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-5), Column(3)));
}
#[test]
fn clearing_scrollback_resets_display_offset() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn clearing_scrollback_sets_vi_cursor_into_viewport() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(3)));
}
#[test]
fn clear_saved_lines() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Add one line of scrollback.
term.grid.scroll_up(&(Line(0)..Line(1)), 1);
// Clear the history.
term.clear_screen(ansi::ClearMode::Saved);
// Make sure that scrolling does not change the grid.
let mut scrolled_grid = term.grid.clone();
scrolled_grid.scroll_display(Scroll::Top);
// Truncate grids for comparison.
scrolled_grid.truncate();
term.grid.truncate();
assert_eq!(term.grid, scrolled_grid);
}
#[test]
fn vi_cursor_keep_pos_on_scrollback_buffer() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point.line = Line(-11);
term.linefeed();
assert_eq!(term.vi_mode_cursor.point.line, Line(-12));
}
#[test]
fn grow_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn grow_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn shrink_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn shrink_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn damage_public_usage() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
// Test that we damage input form [`Term::input`].
let left = term.grid.cursor.point.column.0;
term.input('d');
term.input('a');
term.input('m');
term.input('a');
term.input('g');
term.input('e');
let right = term.grid.cursor.point.column.0;
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), Some(LineDamageBounds { line: 0, left, right }));
assert_eq!(damaged_lines.next(), None);
term.reset_damage();
// Create scrollback.
for _ in 0..20 {
term.newline();
}
match term.damage() {
TermDamage::Full => (),
TermDamage::Partial(_) => panic!("Expected Full damage, however got Partial "),
};
term.reset_damage();
term.scroll_display(Scroll::Delta(10));
term.reset_damage();
// No damage when scrolled into viewport.
for idx in 0..term.columns() {
term.goto(idx as i32, idx);
}
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), None);
// Scroll back into the viewport, so we have 2 visible lines which terminal can write
// to.
term.scroll_display(Scroll::Delta(-2));
term.reset_damage();
term.goto(0, 0);
term.goto(1, 0);
term.goto(2, 0);
let display_offset = term.grid().display_offset();
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset, left: 0, right: 0 })
);
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset + 1, left: 0, right: 0 })
);
assert_eq!(damaged_lines.next(), None);
}
#[test]
fn damage_cursor_movements() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
let num_cols = term.columns();
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
term.goto(1, 1);
// NOTE While we can use `[Term::damage]` to access terminal damage information, in the
// following tests we will be accessing `term.damage.lines` directly to avoid adding extra
// damage information (like cursor and Vi cursor), which we're not testing.
assert_eq!(term.damage.lines[0], LineDamageBounds { line: 0, left: 0, right: 0 });
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
term.damage.reset(num_cols);
term.move_forward(3);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 4 });
term.damage.reset(num_cols);
term.move_backward(8);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 0, right: 4 });
term.goto(5, 5);
term.damage.reset(num_cols);
term.backspace();
term.backspace();
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 5 });
term.damage.reset(num_cols);
term.move_up(1);
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
term.damage.reset(num_cols);
term.move_down(1);
term.move_down(1);
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
term.damage.reset(num_cols);
term.wrapline();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 0 });
term.move_forward(3);
term.move_up(1);
term.damage.reset(num_cols);
term.linefeed();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 3, right: 3 });
term.damage.reset(num_cols);
term.carriage_return();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 3 });
term.damage.reset(num_cols);
term.erase_chars(5);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 5 });
term.damage.reset(num_cols);
term.delete_chars(3);
let right = term.columns() - 1;
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.move_forward(term.columns());
term.damage.reset(num_cols);
term.move_backward_tabs(1);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.save_cursor_position();
term.goto(1, 1);
term.damage.reset(num_cols);
term.restore_cursor_position();
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::All);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Left);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Right);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.damage.reset(num_cols);
term.reverse_index();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 8, right: 8 });
}
#[test]
fn full_damage() {
let size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
assert!(term.damage.full);
for _ in 0..20 {
term.newline();
}
term.reset_damage();
term.clear_screen(ansi::ClearMode::Above);
assert!(term.damage.full);
term.reset_damage();
term.scroll_display(Scroll::Top);
assert!(term.damage.full);
term.reset_damage();
// Sequential call to scroll display without doing anything shouldn't damage.
term.scroll_display(Scroll::Top);
assert!(!term.damage.full);
term.reset_damage();
term.set_options(Config::default());
assert!(term.damage.full);
term.reset_damage();
term.scroll_down_relative(Line(5), 2);
assert!(term.damage.full);
term.reset_damage();
term.scroll_up_relative(Line(3), 2);
assert!(term.damage.full);
term.reset_damage();
term.deccolm();
assert!(term.damage.full);
term.reset_damage();
term.decaln();
assert!(term.damage.full);
term.reset_damage();
term.set_mode(NamedMode::Insert.into());
// Just setting `Insert` mode shouldn't mark terminal as damaged.
assert!(!term.damage.full);
term.reset_damage();
let color_index = 257;
term.set_color(color_index, Rgb::default());
assert!(term.damage.full);
term.reset_damage();
// Setting the same color once again shouldn't trigger full damage.
term.set_color(color_index, Rgb::default());
assert!(!term.damage.full);
term.reset_color(color_index);
assert!(term.damage.full);
term.reset_damage();
// We shouldn't trigger fully damage when cursor gets update.
term.set_color(NamedColor::Cursor as usize, Rgb::default());
assert!(!term.damage.full);
// However requesting terminal damage should mark terminal as fully damaged in `Insert`
// mode.
let _ = term.damage();
assert!(term.damage.full);
term.reset_damage();
term.unset_mode(NamedMode::Insert.into());
assert!(term.damage.full);
term.reset_damage();
// Keep this as a last check, so we don't have to deal with restoring from alt-screen.
term.swap_alt();
assert!(term.damage.full);
term.reset_damage();
let size = TermSize::new(10, 10);
term.resize(size);
assert!(term.damage.full);
}
#[test]
fn window_title() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Title None by default.
assert_eq!(term.title, None);
// Title can be set.
term.set_title(Some("Test".into()));
assert_eq!(term.title, Some("Test".into()));
// Title can be pushed onto stack.
term.push_title();
term.set_title(Some("Next".into()));
assert_eq!(term.title, Some("Next".into()));
assert_eq!(term.title_stack.first().unwrap(), &Some("Test".into()));
// Title can be popped from stack and set as the window title.
term.pop_title();
assert_eq!(term.title, Some("Test".into()));
assert!(term.title_stack.is_empty());
// Title stack doesn't grow infinitely.
for _ in 0..4097 {
term.push_title();
}
assert_eq!(term.title_stack.len(), 4096);
// Title and title stack reset when terminal state is reset.
term.push_title();
term.reset_state();
assert_eq!(term.title, None);
assert!(term.title_stack.is_empty());
// Title stack pops back to default.
term.title = None;
term.push_title();
term.set_title(Some("Test".into()));
term.pop_title();
assert_eq!(term.title, None);
// Title can be reset to default.
term.title = Some("Test".into());
term.set_title(None);
assert_eq!(term.title, None);
}
#[test]
fn parse_cargo_version() {
assert!(version_number(env!("CARGO_PKG_VERSION")) >= 10_01);
assert_eq!(version_number("0.0.1-dev"), 1);
assert_eq!(version_number("0.1.2-dev"), 1_02);
assert_eq!(version_number("1.2.3-dev"), 1_02_03);
assert_eq!(version_number("999.99.99"), 9_99_99_99);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl Line {\n /// Clamp a line to a grid boundary.\n #[must_use]\n pub fn grid_clamp<D: Dimensions>(self, dimensions: &D, boundary: Boundary) -> Self {\n match boundary {\n Boundary::Cursor => max(Line(0), min(dimensions.bottommost_line(), self)),\n Boundary::Grid => {\n let bottommost_line = dimensions.bottommost_line();\n let topmost_line = dimensions.topmost_line();\n max(topmost_line, min(bottommost_line, self))\n },\n Boundary::None => {\n let screen_lines = dimensions.screen_lines() as i32;\n let total_lines = dimensions.total_lines() as i32;\n\n if self >= screen_lines {\n let topmost_line = dimensions.topmost_line();\n let extra = (self.0 - screen_lines) % total_lines;\n topmost_line + extra\n } else {\n let bottommost_line = dimensions.bottommost_line();\n let extra = (self.0 - screen_lines + 1) % total_lines;\n bottommost_line + extra\n }\n },\n }\n }\n}"
],
"name": "line",
"type": "Line"
},
{
"definitions": [
"pub struct Range<Idx> {\n /// The lower bound of the range (inclusive).\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n pub start: Idx,\n /// The upper bound of the range (exclusive).\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n pub end: Idx,\n}",
"impl fmt::Display for Column {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}"
],
"name": "cols",
"type": "Range<Column>"
}
],
"end_line": 633,
"name": "line_to_string",
"signature": "fn line_to_string(\n &self,\n line: Line,\n mut cols: Range<Column>,\n include_wrapped_wide: bool,\n ) -> String",
"start_line": 572
} | {
"class_name": "impl<T> Term<T> {\n #[inline]\n pub fn scroll_display(&mut self, scroll: Scroll)\n where\n T: EventListener,\n {\n let old_display_offset = self.grid.display_offset();\n self.grid.scroll_display(scroll);\n self.event_proxy.send_event(Event::MouseCursorDirty);\n\n // Clamp vi mode cursor to the viewport.\n let viewport_start = -(self.grid.display_offset() as i32);\n let viewport_end = viewport_start + self.bottommost_line().0;\n let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;\n *vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));\n self.vi_mode_recompute_selection();\n\n // Damage everything if display offset changed.\n if old_display_offset != self.grid().display_offset() {\n self.mark_fully_damaged();\n }\n }\n\n pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {\n let num_cols = dimensions.columns();\n let num_lines = dimensions.screen_lines();\n\n let history_size = config.scrolling_history;\n let grid = Grid::new(num_lines, num_cols, history_size);\n let inactive_grid = Grid::new(num_lines, num_cols, 0);\n\n let tabs = TabStops::new(grid.columns());\n\n let scroll_region = Line(0)..Line(grid.screen_lines() as i32);\n\n // Initialize terminal damage, covering the entire terminal upon launch.\n let damage = TermDamageState::new(num_cols, num_lines);\n\n Term {\n inactive_grid,\n scroll_region,\n event_proxy,\n damage,\n config,\n grid,\n tabs,\n inactive_keyboard_mode_stack: Default::default(),\n keyboard_mode_stack: Default::default(),\n active_charset: Default::default(),\n vi_mode_cursor: Default::default(),\n cursor_style: Default::default(),\n colors: color::Colors::default(),\n title_stack: Default::default(),\n is_focused: Default::default(),\n selection: Default::default(),\n title: Default::default(),\n mode: Default::default(),\n }\n }\n\n /// Collect the information about the changes in the lines, which\n /// could be used to minimize the amount of drawing operations.\n ///\n /// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the\n /// collected damage state. Those could easily be tracked by comparing their old and new\n /// value between adjacent frames.\n ///\n /// After reading damage [`reset_damage`] should be called.\n ///\n /// [`reset_damage`]: Self::reset_damage\n #[must_use]\n pub fn damage(&mut self) -> TermDamage<'_> {\n // Ensure the entire terminal is damaged after entering insert mode.\n // Leaving is handled in the ansi handler.\n if self.mode.contains(TermMode::INSERT) {\n self.mark_fully_damaged();\n }\n\n let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);\n\n if self.damage.full {\n return TermDamage::Full;\n }\n\n // Add information about old cursor position and new one if they are not the same, so we\n // cover everything that was produced by `Term::input`.\n if self.damage.last_cursor != previous_cursor {\n // Cursor coordinates are always inside viewport even if you have `display_offset`.\n let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);\n self.damage.damage_point(point);\n }\n\n // Always damage current cursor.\n self.damage_cursor();\n\n // NOTE: damage which changes all the content when the display offset is non-zero (e.g.\n // scrolling) is handled via full damage.\n let display_offset = self.grid().display_offset();\n TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))\n }\n\n /// Resets the terminal damage information.\n pub fn reset_damage(&mut self) {\n self.damage.reset(self.columns());\n }\n\n #[inline]\n fn mark_fully_damaged(&mut self) {\n self.damage.full = true;\n }\n\n /// Set new options for the [`Term`].\n pub fn set_options(&mut self, options: Config)\n where\n T: EventListener,\n {\n let old_config = mem::replace(&mut self.config, options);\n\n let title_event = match &self.title {\n Some(title) => Event::Title(title.clone()),\n None => Event::ResetTitle,\n };\n\n self.event_proxy.send_event(title_event);\n\n if self.mode.contains(TermMode::ALT_SCREEN) {\n self.inactive_grid.update_history(self.config.scrolling_history);\n } else {\n self.grid.update_history(self.config.scrolling_history);\n }\n\n if self.config.kitty_keyboard != old_config.kitty_keyboard {\n self.keyboard_mode_stack = Vec::new();\n self.inactive_keyboard_mode_stack = Vec::new();\n self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);\n }\n\n // Damage everything on config updates.\n self.mark_fully_damaged();\n }\n\n /// Convert the active selection to a String.\n pub fn selection_to_string(&self) -> Option<String> {\n let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;\n let SelectionRange { start, end, .. } = selection_range;\n\n let mut res = String::new();\n\n match self.selection.as_ref() {\n Some(Selection { ty: SelectionType::Block, .. }) => {\n for line in (start.line.0..end.line.0).map(Line::from) {\n res += self\n .line_to_string(line, start.column..end.column, start.column.0 != 0)\n .trim_end();\n res += \"\\n\";\n }\n\n res += self.line_to_string(end.line, start.column..end.column, true).trim_end();\n },\n Some(Selection { ty: SelectionType::Lines, .. }) => {\n res = self.bounds_to_string(start, end) + \"\\n\";\n },\n _ => {\n res = self.bounds_to_string(start, end);\n },\n }\n\n Some(res)\n }\n\n /// Convert range between two points to a String.\n pub fn bounds_to_string(&self, start: Point, end: Point) -> String {\n let mut res = String::new();\n\n for line in (start.line.0..=end.line.0).map(Line::from) {\n let start_col = if line == start.line { start.column } else { Column(0) };\n let end_col = if line == end.line { end.column } else { self.last_column() };\n\n res += &self.line_to_string(line, start_col..end_col, line == end.line);\n }\n\n res.strip_suffix('\\n').map(str::to_owned).unwrap_or(res)\n }\n\n /// Convert a single line in the grid to a String.\n fn line_to_string(\n &self,\n line: Line,\n mut cols: Range<Column>,\n include_wrapped_wide: bool,\n ) -> String {\n let mut text = String::new();\n\n let grid_line = &self.grid[line];\n let line_length = cmp::min(grid_line.line_length(), cols.end + 1);\n\n // Include wide char when trailing spacer is selected.\n if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {\n cols.start -= 1;\n }\n\n let mut tab_mode = false;\n for column in (cols.start.0..line_length.0).map(Column::from) {\n let cell = &grid_line[column];\n\n // Skip over cells until next tab-stop once a tab was found.\n if tab_mode {\n if self.tabs[column] || cell.c != ' ' {\n tab_mode = false;\n } else {\n continue;\n }\n }\n\n if cell.c == '\\t' {\n tab_mode = true;\n }\n\n if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {\n // Push cells primary character.\n text.push(cell.c);\n\n // Push zero-width characters.\n for c in cell.zerowidth().into_iter().flatten() {\n text.push(*c);\n }\n }\n }\n\n if cols.end >= self.columns() - 1\n && (line_length.0 == 0\n || !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))\n {\n text.push('\\n');\n }\n\n // If wide char is not part of the selection, but leading spacer is, include it.\n if line_length == self.columns()\n && line_length.0 >= 2\n && grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)\n && include_wrapped_wide\n {\n text.push(self.grid[line - 1i32][Column(0)].c);\n }\n\n text\n }\n\n /// Terminal content required for rendering.\n #[inline]\n pub fn renderable_content(&self) -> RenderableContent<'_>\n where\n T: EventListener,\n {\n RenderableContent::new(self)\n }\n\n /// Access to the raw grid data structure.\n pub fn grid(&self) -> &Grid<Cell> {\n &self.grid\n }\n\n /// Mutable access to the raw grid data structure.\n pub fn grid_mut(&mut self) -> &mut Grid<Cell> {\n &mut self.grid\n }\n\n /// Resize terminal to new dimensions.\n pub fn resize<S: Dimensions>(&mut self, size: S) {\n let old_cols = self.columns();\n let old_lines = self.screen_lines();\n\n let num_cols = size.columns();\n let num_lines = size.screen_lines();\n\n if old_cols == num_cols && old_lines == num_lines {\n debug!(\"Term::resize dimensions unchanged\");\n return;\n }\n\n debug!(\"New num_cols is {} and num_lines is {}\", num_cols, num_lines);\n\n // Move vi mode cursor with the content.\n let history_size = self.history_size();\n let mut delta = num_lines as i32 - old_lines as i32;\n let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);\n delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);\n self.vi_mode_cursor.point.line += delta;\n\n let is_alt = self.mode.contains(TermMode::ALT_SCREEN);\n self.grid.resize(!is_alt, num_lines, num_cols);\n self.inactive_grid.resize(is_alt, num_lines, num_cols);\n\n // Invalidate selection and tabs only when necessary.\n if old_cols != num_cols {\n self.selection = None;\n\n // Recreate tabs list.\n self.tabs.resize(num_cols);\n } else if let Some(selection) = self.selection.take() {\n let max_lines = cmp::max(num_lines, old_lines) as i32;\n let range = Line(0)..Line(max_lines);\n self.selection = selection.rotate(self, &range, -delta);\n }\n\n // Clamp vi cursor to viewport.\n let vi_point = self.vi_mode_cursor.point;\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let viewport_bottom = viewport_top + self.bottommost_line();\n self.vi_mode_cursor.point.line =\n cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);\n self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());\n\n // Reset scrolling region.\n self.scroll_region = Line(0)..Line(self.screen_lines() as i32);\n\n // Resize damage information.\n self.damage.resize(num_cols, num_lines);\n }\n\n /// Active terminal modes.\n #[inline]\n pub fn mode(&self) -> &TermMode {\n &self.mode\n }\n\n /// Swap primary and alternate screen buffer.\n pub fn swap_alt(&mut self) {\n if !self.mode.contains(TermMode::ALT_SCREEN) {\n // Set alt screen cursor to the current primary screen cursor.\n self.inactive_grid.cursor = self.grid.cursor.clone();\n\n // Drop information about the primary screens saved cursor.\n self.grid.saved_cursor = self.grid.cursor.clone();\n\n // Reset alternate screen contents.\n self.inactive_grid.reset_region(..);\n }\n\n mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);\n let keyboard_mode =\n self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();\n self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);\n\n mem::swap(&mut self.grid, &mut self.inactive_grid);\n self.mode ^= TermMode::ALT_SCREEN;\n self.selection = None;\n self.mark_fully_damaged();\n }\n\n /// Scroll screen down.\n ///\n /// Text moves down; clear at bottom\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling down relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection =\n self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));\n\n // Scroll vi mode cursor.\n let line = &mut self.vi_mode_cursor.point.line;\n if region.start <= *line && region.end > *line {\n *line = cmp::min(*line + lines, region.end - 1);\n }\n\n // Scroll between origin and bottom\n self.grid.scroll_down(®ion, lines);\n self.mark_fully_damaged();\n }\n\n /// Scroll screen up\n ///\n /// Text moves up; clear at top\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling up relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));\n\n self.grid.scroll_up(®ion, lines);\n\n // Scroll vi mode cursor.\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let top = if region.start == 0 { viewport_top } else { region.start };\n let line = &mut self.vi_mode_cursor.point.line;\n if (top <= *line) && region.end > *line {\n *line = cmp::max(*line - lines, top);\n }\n self.mark_fully_damaged();\n }\n\n fn deccolm(&mut self)\n where\n T: EventListener,\n {\n // Setting 132 column font makes no sense, but run the other side effects.\n // Clear scrolling region.\n self.set_scrolling_region(1, None);\n\n // Clear grid.\n self.grid.reset_region(..);\n self.mark_fully_damaged();\n }\n\n #[inline]\n pub fn exit(&mut self)\n where\n T: EventListener,\n {\n self.event_proxy.send_event(Event::Exit);\n }\n\n /// Toggle the vi mode.\n #[inline]\n pub fn toggle_vi_mode(&mut self)\n where\n T: EventListener,\n {\n self.mode ^= TermMode::VI;\n\n if self.mode.contains(TermMode::VI) {\n let display_offset = self.grid.display_offset() as i32;\n if self.grid.cursor.point.line > self.bottommost_line() - display_offset {\n // Move cursor to top-left if terminal cursor is not visible.\n let point = Point::new(Line(-display_offset), Column(0));\n self.vi_mode_cursor = ViModeCursor::new(point);\n } else {\n // Reset vi mode cursor position to match primary cursor.\n self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);\n }\n }\n\n // Update UI about cursor blinking state changes.\n self.event_proxy.send_event(Event::CursorBlinkingChange);\n }\n\n /// Move vi mode cursor.\n #[inline]\n pub fn vi_motion(&mut self, motion: ViMotion)\n where\n T: EventListener,\n {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Move cursor.\n self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);\n self.vi_mode_recompute_selection();\n }\n\n /// Move vi cursor to a point in the grid.\n #[inline]\n pub fn vi_goto_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n // Move viewport to make point visible.\n self.scroll_to_point(point);\n\n // Move vi cursor to the point.\n self.vi_mode_cursor.point = point;\n\n self.vi_mode_recompute_selection();\n }\n\n /// Update the active selection to match the vi mode cursor position.\n #[inline]\n fn vi_mode_recompute_selection(&mut self) {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Update only if non-empty selection is present.\n if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {\n selection.update(self.vi_mode_cursor.point, Side::Left);\n selection.include_all();\n }\n }\n\n /// Scroll display to point if it is outside of viewport.\n pub fn scroll_to_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n let display_offset = self.grid.display_offset() as i32;\n let screen_lines = self.grid.screen_lines() as i32;\n\n if point.line < -display_offset {\n let lines = point.line + display_offset;\n self.scroll_display(Scroll::Delta(-lines.0));\n } else if point.line >= (screen_lines - display_offset) {\n let lines = point.line + display_offset - screen_lines + 1i32;\n self.scroll_display(Scroll::Delta(-lines.0));\n }\n }\n\n /// Jump to the end of a wide cell.\n pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {\n let flags = self.grid[point.line][point.column].flags;\n\n match direction {\n Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n point.column = Column(1);\n point.line += 1;\n },\n Direction::Right if flags.contains(Flags::WIDE_CHAR) => {\n point.column = cmp::min(point.column + 1, self.last_column());\n },\n Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {\n if flags.contains(Flags::WIDE_CHAR_SPACER) {\n point.column -= 1;\n }\n\n let prev = point.sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n point = prev;\n }\n },\n _ => (),\n }\n\n point\n }\n\n #[inline]\n pub fn semantic_escape_chars(&self) -> &str {\n &self.config.semantic_escape_chars\n }\n\n #[cfg(test)]\n pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {\n self.config.semantic_escape_chars = semantic_escape_chars.into();\n }\n\n /// Active terminal cursor style.\n ///\n /// While vi mode is active, this will automatically return the vi mode cursor style.\n #[inline]\n pub fn cursor_style(&self) -> CursorStyle {\n let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);\n\n if self.mode.contains(TermMode::VI) {\n self.config.vi_mode_cursor_style.unwrap_or(cursor_style)\n } else {\n cursor_style\n }\n }\n\n pub fn colors(&self) -> &Colors {\n &self.colors\n }\n\n /// Insert a linebreak at the current cursor position.\n #[inline]\n fn wrapline(&mut self)\n where\n T: EventListener,\n {\n if !self.mode.contains(TermMode::LINE_WRAP) {\n return;\n }\n\n trace!(\"Wrapping input\");\n\n self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);\n\n if self.grid.cursor.point.line + 1 >= self.scroll_region.end {\n self.linefeed();\n } else {\n self.damage_cursor();\n self.grid.cursor.point.line += 1;\n }\n\n self.grid.cursor.point.column = Column(0);\n self.grid.cursor.input_needs_wrap = false;\n self.damage_cursor();\n }\n\n /// Write `c` to the cell at the cursor position.\n #[inline(always)]\n fn write_at_cursor(&mut self, c: char) {\n let c = self.grid.cursor.charsets[self.active_charset].map(c);\n let fg = self.grid.cursor.template.fg;\n let bg = self.grid.cursor.template.bg;\n let flags = self.grid.cursor.template.flags;\n let extra = self.grid.cursor.template.extra.clone();\n\n let mut cursor_cell = self.grid.cursor_cell();\n\n // Clear all related cells when overwriting a fullwidth cell.\n if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {\n // Remove wide char and spacer.\n let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);\n let point = self.grid.cursor.point;\n if wide && point.column < self.last_column() {\n self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);\n } else if point.column > 0 {\n self.grid[point.line][point.column - 1].clear_wide();\n }\n\n // Remove leading spacers.\n if point.column <= 1 && point.line != self.topmost_line() {\n let column = self.last_column();\n self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);\n }\n\n cursor_cell = self.grid.cursor_cell();\n }\n\n cursor_cell.c = c;\n cursor_cell.fg = fg;\n cursor_cell.bg = bg;\n cursor_cell.flags = flags;\n cursor_cell.extra = extra;\n }\n\n #[inline]\n fn damage_cursor(&mut self) {\n // The normal cursor coordinates are always in viewport.\n let point =\n Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);\n self.damage.damage_point(point);\n }\n\n #[inline]\n fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {\n let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;\n self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;\n let new_mode = match apply {\n KeyboardModesApplyBehavior::Replace => mode,\n KeyboardModesApplyBehavior::Union => active_mode.union(mode),\n KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),\n };\n trace!(\"Setting keyboard mode to {new_mode:?}\");\n self.mode |= new_mode;\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
expand_wide | alacritty-master/alacritty_terminal/src/term/mod.rs | pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {
let flags = self.grid[point.line][point.column].flags;
match direction {
Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
point.column = Column(1);
point.line += 1;
},
Direction::Right if flags.contains(Flags::WIDE_CHAR) => {
point.column = cmp::min(point.column + 1, self.last_column());
},
Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {
if flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
let prev = point.sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
point = prev;
}
},
_ => (),
}
point
} | //! Exports the `Term` type which is a high-level API for the Grid.
use std::ops::{Index, IndexMut, Range};
use std::sync::Arc;
use std::{cmp, mem, ptr, slice, str};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use base64::engine::general_purpose::STANDARD as Base64;
use base64::Engine;
use bitflags::bitflags;
use log::{debug, trace};
use unicode_width::UnicodeWidthChar;
use crate::event::{Event, EventListener};
use crate::grid::{Dimensions, Grid, GridIterator, Scroll};
use crate::index::{self, Boundary, Column, Direction, Line, Point, Side};
use crate::selection::{Selection, SelectionRange, SelectionType};
use crate::term::cell::{Cell, Flags, LineLength};
use crate::term::color::Colors;
use crate::vi_mode::{ViModeCursor, ViMotion};
use crate::vte::ansi::{
self, Attr, CharsetIndex, Color, CursorShape, CursorStyle, Handler, Hyperlink, KeyboardModes,
KeyboardModesApplyBehavior, NamedColor, NamedMode, NamedPrivateMode, PrivateMode, Rgb,
StandardCharset,
};
pub mod cell;
pub mod color;
pub mod search;
/// Minimum number of columns.
///
/// A minimum of 2 is necessary to hold fullwidth unicode characters.
pub const MIN_COLUMNS: usize = 2;
/// Minimum number of visible lines.
pub const MIN_SCREEN_LINES: usize = 1;
/// Max size of the window title stack.
const TITLE_STACK_MAX_DEPTH: usize = 4096;
/// Default semantic escape characters.
pub const SEMANTIC_ESCAPE_CHARS: &str = ",│`|:\"' ()[]{}<>\t";
/// Max size of the keyboard modes.
const KEYBOARD_MODE_STACK_MAX_DEPTH: usize = TITLE_STACK_MAX_DEPTH;
/// Default tab interval, corresponding to terminfo `it` value.
const INITIAL_TABSTOPS: usize = 8;
bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct TermMode: u32 {
const NONE = 0;
const SHOW_CURSOR = 1;
const APP_CURSOR = 1 << 1;
const APP_KEYPAD = 1 << 2;
const MOUSE_REPORT_CLICK = 1 << 3;
const BRACKETED_PASTE = 1 << 4;
const SGR_MOUSE = 1 << 5;
const MOUSE_MOTION = 1 << 6;
const LINE_WRAP = 1 << 7;
const LINE_FEED_NEW_LINE = 1 << 8;
const ORIGIN = 1 << 9;
const INSERT = 1 << 10;
const FOCUS_IN_OUT = 1 << 11;
const ALT_SCREEN = 1 << 12;
const MOUSE_DRAG = 1 << 13;
const UTF8_MOUSE = 1 << 14;
const ALTERNATE_SCROLL = 1 << 15;
const VI = 1 << 16;
const URGENCY_HINTS = 1 << 17;
const DISAMBIGUATE_ESC_CODES = 1 << 18;
const REPORT_EVENT_TYPES = 1 << 19;
const REPORT_ALTERNATE_KEYS = 1 << 20;
const REPORT_ALL_KEYS_AS_ESC = 1 << 21;
const REPORT_ASSOCIATED_TEXT = 1 << 22;
const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();
const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()
| Self::REPORT_EVENT_TYPES.bits()
| Self::REPORT_ALTERNATE_KEYS.bits()
| Self::REPORT_ALL_KEYS_AS_ESC.bits()
| Self::REPORT_ASSOCIATED_TEXT.bits();
const ANY = u32::MAX;
}
}
impl From<KeyboardModes> for TermMode {
fn from(value: KeyboardModes) -> Self {
let mut mode = Self::empty();
let disambiguate_esc_codes = value.contains(KeyboardModes::DISAMBIGUATE_ESC_CODES);
mode.set(TermMode::DISAMBIGUATE_ESC_CODES, disambiguate_esc_codes);
let report_event_types = value.contains(KeyboardModes::REPORT_EVENT_TYPES);
mode.set(TermMode::REPORT_EVENT_TYPES, report_event_types);
let report_alternate_keys = value.contains(KeyboardModes::REPORT_ALTERNATE_KEYS);
mode.set(TermMode::REPORT_ALTERNATE_KEYS, report_alternate_keys);
let report_all_keys_as_esc = value.contains(KeyboardModes::REPORT_ALL_KEYS_AS_ESC);
mode.set(TermMode::REPORT_ALL_KEYS_AS_ESC, report_all_keys_as_esc);
let report_associated_text = value.contains(KeyboardModes::REPORT_ASSOCIATED_TEXT);
mode.set(TermMode::REPORT_ASSOCIATED_TEXT, report_associated_text);
mode
}
}
impl Default for TermMode {
fn default() -> TermMode {
TermMode::SHOW_CURSOR
| TermMode::LINE_WRAP
| TermMode::ALTERNATE_SCROLL
| TermMode::URGENCY_HINTS
}
}
/// Convert a terminal point to a viewport relative point.
#[inline]
pub fn point_to_viewport(display_offset: usize, point: Point) -> Option<Point<usize>> {
let viewport_line = point.line.0 + display_offset as i32;
usize::try_from(viewport_line).ok().map(|line| Point::new(line, point.column))
}
/// Convert a viewport relative point to a terminal point.
#[inline]
pub fn viewport_to_point(display_offset: usize, point: Point<usize>) -> Point {
let line = Line(point.line as i32) - display_offset;
Point::new(line, point.column)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LineDamageBounds {
/// Damaged line number.
pub line: usize,
/// Leftmost damaged column.
pub left: usize,
/// Rightmost damaged column.
pub right: usize,
}
impl LineDamageBounds {
#[inline]
pub fn new(line: usize, left: usize, right: usize) -> Self {
Self { line, left, right }
}
#[inline]
pub fn undamaged(line: usize, num_cols: usize) -> Self {
Self { line, left: num_cols, right: 0 }
}
#[inline]
pub fn reset(&mut self, num_cols: usize) {
*self = Self::undamaged(self.line, num_cols);
}
#[inline]
pub fn expand(&mut self, left: usize, right: usize) {
self.left = cmp::min(self.left, left);
self.right = cmp::max(self.right, right);
}
#[inline]
pub fn is_damaged(&self) -> bool {
self.left <= self.right
}
}
/// Terminal damage information collected since the last [`Term::reset_damage`] call.
#[derive(Debug)]
pub enum TermDamage<'a> {
/// The entire terminal is damaged.
Full,
/// Iterator over damaged lines in the terminal.
Partial(TermDamageIterator<'a>),
}
/// Iterator over the terminal's viewport damaged lines.
#[derive(Clone, Debug)]
pub struct TermDamageIterator<'a> {
line_damage: slice::Iter<'a, LineDamageBounds>,
display_offset: usize,
}
impl<'a> TermDamageIterator<'a> {
pub fn new(line_damage: &'a [LineDamageBounds], display_offset: usize) -> Self {
let num_lines = line_damage.len();
// Filter out invisible damage.
let line_damage = &line_damage[..num_lines.saturating_sub(display_offset)];
Self { display_offset, line_damage: line_damage.iter() }
}
}
impl Iterator for TermDamageIterator<'_> {
type Item = LineDamageBounds;
fn next(&mut self) -> Option<Self::Item> {
self.line_damage.find_map(|line| {
line.is_damaged().then_some(LineDamageBounds::new(
line.line + self.display_offset,
line.left,
line.right,
))
})
}
}
/// State of the terminal damage.
struct TermDamageState {
/// Hint whether terminal should be damaged entirely regardless of the actual damage changes.
full: bool,
/// Information about damage on terminal lines.
lines: Vec<LineDamageBounds>,
/// Old terminal cursor point.
last_cursor: Point,
}
impl TermDamageState {
fn new(num_cols: usize, num_lines: usize) -> Self {
let lines =
(0..num_lines).map(|line| LineDamageBounds::undamaged(line, num_cols)).collect();
Self { full: true, lines, last_cursor: Default::default() }
}
#[inline]
fn resize(&mut self, num_cols: usize, num_lines: usize) {
// Reset point, so old cursor won't end up outside of the viewport.
self.last_cursor = Default::default();
self.full = true;
self.lines.clear();
self.lines.reserve(num_lines);
for line in 0..num_lines {
self.lines.push(LineDamageBounds::undamaged(line, num_cols));
}
}
/// Damage point inside of the viewport.
#[inline]
fn damage_point(&mut self, point: Point<usize>) {
self.damage_line(point.line, point.column.0, point.column.0);
}
/// Expand `line`'s damage to span at least `left` to `right` column.
#[inline]
fn damage_line(&mut self, line: usize, left: usize, right: usize) {
self.lines[line].expand(left, right);
}
/// Reset information about terminal damage.
fn reset(&mut self, num_cols: usize) {
self.full = false;
self.lines.iter_mut().for_each(|line| line.reset(num_cols));
}
}
pub struct Term<T> {
/// Terminal focus controlling the cursor shape.
pub is_focused: bool,
/// Cursor for keyboard selection.
pub vi_mode_cursor: ViModeCursor,
pub selection: Option<Selection>,
/// Currently active grid.
///
/// Tracks the screen buffer currently in use. While the alternate screen buffer is active,
/// this will be the alternate grid. Otherwise it is the primary screen buffer.
grid: Grid<Cell>,
/// Currently inactive grid.
///
/// Opposite of the active grid. While the alternate screen buffer is active, this will be the
/// primary grid. Otherwise it is the alternate screen buffer.
inactive_grid: Grid<Cell>,
/// Index into `charsets`, pointing to what ASCII is currently being mapped to.
active_charset: CharsetIndex,
/// Tabstops.
tabs: TabStops,
/// Mode flags.
mode: TermMode,
/// Scroll region.
///
/// Range going from top to bottom of the terminal, indexed from the top of the viewport.
scroll_region: Range<Line>,
/// Modified terminal colors.
colors: Colors,
/// Current style of the cursor.
cursor_style: Option<CursorStyle>,
/// Proxy for sending events to the event loop.
event_proxy: T,
/// Current title of the window.
title: Option<String>,
/// Stack of saved window titles. When a title is popped from this stack, the `title` for the
/// term is set.
title_stack: Vec<Option<String>>,
/// The stack for the keyboard modes.
keyboard_mode_stack: Vec<KeyboardModes>,
/// Currently inactive keyboard mode stack.
inactive_keyboard_mode_stack: Vec<KeyboardModes>,
/// Information about damaged cells.
damage: TermDamageState,
/// Config directly for the terminal.
config: Config,
}
/// Configuration options for the [`Term`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
/// The maximum amount of scrolling history.
pub scrolling_history: usize,
/// Default cursor style to reset the cursor to.
pub default_cursor_style: CursorStyle,
/// Cursor style for Vi mode.
pub vi_mode_cursor_style: Option<CursorStyle>,
/// The characters which terminate semantic selection.
///
/// The default value is [`SEMANTIC_ESCAPE_CHARS`].
pub semantic_escape_chars: String,
/// Whether to enable kitty keyboard protocol.
pub kitty_keyboard: bool,
/// OSC52 support mode.
pub osc52: Osc52,
}
impl Default for Config {
fn default() -> Self {
Self {
scrolling_history: 10000,
semantic_escape_chars: SEMANTIC_ESCAPE_CHARS.to_owned(),
default_cursor_style: Default::default(),
vi_mode_cursor_style: Default::default(),
kitty_keyboard: Default::default(),
osc52: Default::default(),
}
}
}
/// OSC 52 behavior.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all = "lowercase"))]
pub enum Osc52 {
/// The handling of the escape sequence is disabled.
Disabled,
/// Only copy sequence is accepted.
///
/// This option is the default as a compromise between entirely
/// disabling it (the most secure) and allowing `paste` (the less secure).
#[default]
OnlyCopy,
/// Only paste sequence is accepted.
OnlyPaste,
/// Both are accepted.
CopyPaste,
}
impl<T> Term<T> {
#[inline]
pub fn scroll_display(&mut self, scroll: Scroll)
where
T: EventListener,
{
let old_display_offset = self.grid.display_offset();
self.grid.scroll_display(scroll);
self.event_proxy.send_event(Event::MouseCursorDirty);
// Clamp vi mode cursor to the viewport.
let viewport_start = -(self.grid.display_offset() as i32);
let viewport_end = viewport_start + self.bottommost_line().0;
let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;
*vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));
self.vi_mode_recompute_selection();
// Damage everything if display offset changed.
if old_display_offset != self.grid().display_offset() {
self.mark_fully_damaged();
}
}
pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {
let num_cols = dimensions.columns();
let num_lines = dimensions.screen_lines();
let history_size = config.scrolling_history;
let grid = Grid::new(num_lines, num_cols, history_size);
let inactive_grid = Grid::new(num_lines, num_cols, 0);
let tabs = TabStops::new(grid.columns());
let scroll_region = Line(0)..Line(grid.screen_lines() as i32);
// Initialize terminal damage, covering the entire terminal upon launch.
let damage = TermDamageState::new(num_cols, num_lines);
Term {
inactive_grid,
scroll_region,
event_proxy,
damage,
config,
grid,
tabs,
inactive_keyboard_mode_stack: Default::default(),
keyboard_mode_stack: Default::default(),
active_charset: Default::default(),
vi_mode_cursor: Default::default(),
cursor_style: Default::default(),
colors: color::Colors::default(),
title_stack: Default::default(),
is_focused: Default::default(),
selection: Default::default(),
title: Default::default(),
mode: Default::default(),
}
}
/// Collect the information about the changes in the lines, which
/// could be used to minimize the amount of drawing operations.
///
/// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the
/// collected damage state. Those could easily be tracked by comparing their old and new
/// value between adjacent frames.
///
/// After reading damage [`reset_damage`] should be called.
///
/// [`reset_damage`]: Self::reset_damage
#[must_use]
pub fn damage(&mut self) -> TermDamage<'_> {
// Ensure the entire terminal is damaged after entering insert mode.
// Leaving is handled in the ansi handler.
if self.mode.contains(TermMode::INSERT) {
self.mark_fully_damaged();
}
let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);
if self.damage.full {
return TermDamage::Full;
}
// Add information about old cursor position and new one if they are not the same, so we
// cover everything that was produced by `Term::input`.
if self.damage.last_cursor != previous_cursor {
// Cursor coordinates are always inside viewport even if you have `display_offset`.
let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);
self.damage.damage_point(point);
}
// Always damage current cursor.
self.damage_cursor();
// NOTE: damage which changes all the content when the display offset is non-zero (e.g.
// scrolling) is handled via full damage.
let display_offset = self.grid().display_offset();
TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))
}
/// Resets the terminal damage information.
pub fn reset_damage(&mut self) {
self.damage.reset(self.columns());
}
#[inline]
fn mark_fully_damaged(&mut self) {
self.damage.full = true;
}
/// Set new options for the [`Term`].
pub fn set_options(&mut self, options: Config)
where
T: EventListener,
{
let old_config = mem::replace(&mut self.config, options);
let title_event = match &self.title {
Some(title) => Event::Title(title.clone()),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
if self.mode.contains(TermMode::ALT_SCREEN) {
self.inactive_grid.update_history(self.config.scrolling_history);
} else {
self.grid.update_history(self.config.scrolling_history);
}
if self.config.kitty_keyboard != old_config.kitty_keyboard {
self.keyboard_mode_stack = Vec::new();
self.inactive_keyboard_mode_stack = Vec::new();
self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);
}
// Damage everything on config updates.
self.mark_fully_damaged();
}
/// Convert the active selection to a String.
pub fn selection_to_string(&self) -> Option<String> {
let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;
let SelectionRange { start, end, .. } = selection_range;
let mut res = String::new();
match self.selection.as_ref() {
Some(Selection { ty: SelectionType::Block, .. }) => {
for line in (start.line.0..end.line.0).map(Line::from) {
res += self
.line_to_string(line, start.column..end.column, start.column.0 != 0)
.trim_end();
res += "\n";
}
res += self.line_to_string(end.line, start.column..end.column, true).trim_end();
},
Some(Selection { ty: SelectionType::Lines, .. }) => {
res = self.bounds_to_string(start, end) + "\n";
},
_ => {
res = self.bounds_to_string(start, end);
},
}
Some(res)
}
/// Convert range between two points to a String.
pub fn bounds_to_string(&self, start: Point, end: Point) -> String {
let mut res = String::new();
for line in (start.line.0..=end.line.0).map(Line::from) {
let start_col = if line == start.line { start.column } else { Column(0) };
let end_col = if line == end.line { end.column } else { self.last_column() };
res += &self.line_to_string(line, start_col..end_col, line == end.line);
}
res.strip_suffix('\n').map(str::to_owned).unwrap_or(res)
}
/// Convert a single line in the grid to a String.
fn line_to_string(
&self,
line: Line,
mut cols: Range<Column>,
include_wrapped_wide: bool,
) -> String {
let mut text = String::new();
let grid_line = &self.grid[line];
let line_length = cmp::min(grid_line.line_length(), cols.end + 1);
// Include wide char when trailing spacer is selected.
if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {
cols.start -= 1;
}
let mut tab_mode = false;
for column in (cols.start.0..line_length.0).map(Column::from) {
let cell = &grid_line[column];
// Skip over cells until next tab-stop once a tab was found.
if tab_mode {
if self.tabs[column] || cell.c != ' ' {
tab_mode = false;
} else {
continue;
}
}
if cell.c == '\t' {
tab_mode = true;
}
if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {
// Push cells primary character.
text.push(cell.c);
// Push zero-width characters.
for c in cell.zerowidth().into_iter().flatten() {
text.push(*c);
}
}
}
if cols.end >= self.columns() - 1
&& (line_length.0 == 0
|| !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))
{
text.push('\n');
}
// If wide char is not part of the selection, but leading spacer is, include it.
if line_length == self.columns()
&& line_length.0 >= 2
&& grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)
&& include_wrapped_wide
{
text.push(self.grid[line - 1i32][Column(0)].c);
}
text
}
/// Terminal content required for rendering.
#[inline]
pub fn renderable_content(&self) -> RenderableContent<'_>
where
T: EventListener,
{
RenderableContent::new(self)
}
/// Access to the raw grid data structure.
pub fn grid(&self) -> &Grid<Cell> {
&self.grid
}
/// Mutable access to the raw grid data structure.
pub fn grid_mut(&mut self) -> &mut Grid<Cell> {
&mut self.grid
}
/// Resize terminal to new dimensions.
pub fn resize<S: Dimensions>(&mut self, size: S) {
let old_cols = self.columns();
let old_lines = self.screen_lines();
let num_cols = size.columns();
let num_lines = size.screen_lines();
if old_cols == num_cols && old_lines == num_lines {
debug!("Term::resize dimensions unchanged");
return;
}
debug!("New num_cols is {} and num_lines is {}", num_cols, num_lines);
// Move vi mode cursor with the content.
let history_size = self.history_size();
let mut delta = num_lines as i32 - old_lines as i32;
let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);
delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);
self.vi_mode_cursor.point.line += delta;
let is_alt = self.mode.contains(TermMode::ALT_SCREEN);
self.grid.resize(!is_alt, num_lines, num_cols);
self.inactive_grid.resize(is_alt, num_lines, num_cols);
// Invalidate selection and tabs only when necessary.
if old_cols != num_cols {
self.selection = None;
// Recreate tabs list.
self.tabs.resize(num_cols);
} else if let Some(selection) = self.selection.take() {
let max_lines = cmp::max(num_lines, old_lines) as i32;
let range = Line(0)..Line(max_lines);
self.selection = selection.rotate(self, &range, -delta);
}
// Clamp vi cursor to viewport.
let vi_point = self.vi_mode_cursor.point;
let viewport_top = Line(-(self.grid.display_offset() as i32));
let viewport_bottom = viewport_top + self.bottommost_line();
self.vi_mode_cursor.point.line =
cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);
self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());
// Reset scrolling region.
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
// Resize damage information.
self.damage.resize(num_cols, num_lines);
}
/// Active terminal modes.
#[inline]
pub fn mode(&self) -> &TermMode {
&self.mode
}
/// Swap primary and alternate screen buffer.
pub fn swap_alt(&mut self) {
if !self.mode.contains(TermMode::ALT_SCREEN) {
// Set alt screen cursor to the current primary screen cursor.
self.inactive_grid.cursor = self.grid.cursor.clone();
// Drop information about the primary screens saved cursor.
self.grid.saved_cursor = self.grid.cursor.clone();
// Reset alternate screen contents.
self.inactive_grid.reset_region(..);
}
mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);
let keyboard_mode =
self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();
self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);
mem::swap(&mut self.grid, &mut self.inactive_grid);
self.mode ^= TermMode::ALT_SCREEN;
self.selection = None;
self.mark_fully_damaged();
}
/// Scroll screen down.
///
/// Text moves down; clear at bottom
/// Expects origin to be in scroll range.
#[inline]
fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling down relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection =
self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));
// Scroll vi mode cursor.
let line = &mut self.vi_mode_cursor.point.line;
if region.start <= *line && region.end > *line {
*line = cmp::min(*line + lines, region.end - 1);
}
// Scroll between origin and bottom
self.grid.scroll_down(®ion, lines);
self.mark_fully_damaged();
}
/// Scroll screen up
///
/// Text moves up; clear at top
/// Expects origin to be in scroll range.
#[inline]
fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {
trace!("Scrolling up relative: origin={}, lines={}", origin, lines);
lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);
let region = origin..self.scroll_region.end;
// Scroll selection.
self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));
self.grid.scroll_up(®ion, lines);
// Scroll vi mode cursor.
let viewport_top = Line(-(self.grid.display_offset() as i32));
let top = if region.start == 0 { viewport_top } else { region.start };
let line = &mut self.vi_mode_cursor.point.line;
if (top <= *line) && region.end > *line {
*line = cmp::max(*line - lines, top);
}
self.mark_fully_damaged();
}
fn deccolm(&mut self)
where
T: EventListener,
{
// Setting 132 column font makes no sense, but run the other side effects.
// Clear scrolling region.
self.set_scrolling_region(1, None);
// Clear grid.
self.grid.reset_region(..);
self.mark_fully_damaged();
}
#[inline]
pub fn exit(&mut self)
where
T: EventListener,
{
self.event_proxy.send_event(Event::Exit);
}
/// Toggle the vi mode.
#[inline]
pub fn toggle_vi_mode(&mut self)
where
T: EventListener,
{
self.mode ^= TermMode::VI;
if self.mode.contains(TermMode::VI) {
let display_offset = self.grid.display_offset() as i32;
if self.grid.cursor.point.line > self.bottommost_line() - display_offset {
// Move cursor to top-left if terminal cursor is not visible.
let point = Point::new(Line(-display_offset), Column(0));
self.vi_mode_cursor = ViModeCursor::new(point);
} else {
// Reset vi mode cursor position to match primary cursor.
self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);
}
}
// Update UI about cursor blinking state changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
/// Move vi mode cursor.
#[inline]
pub fn vi_motion(&mut self, motion: ViMotion)
where
T: EventListener,
{
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Move cursor.
self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);
self.vi_mode_recompute_selection();
}
/// Move vi cursor to a point in the grid.
#[inline]
pub fn vi_goto_point(&mut self, point: Point)
where
T: EventListener,
{
// Move viewport to make point visible.
self.scroll_to_point(point);
// Move vi cursor to the point.
self.vi_mode_cursor.point = point;
self.vi_mode_recompute_selection();
}
/// Update the active selection to match the vi mode cursor position.
#[inline]
fn vi_mode_recompute_selection(&mut self) {
// Require vi mode to be active.
if !self.mode.contains(TermMode::VI) {
return;
}
// Update only if non-empty selection is present.
if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {
selection.update(self.vi_mode_cursor.point, Side::Left);
selection.include_all();
}
}
/// Scroll display to point if it is outside of viewport.
pub fn scroll_to_point(&mut self, point: Point)
where
T: EventListener,
{
let display_offset = self.grid.display_offset() as i32;
let screen_lines = self.grid.screen_lines() as i32;
if point.line < -display_offset {
let lines = point.line + display_offset;
self.scroll_display(Scroll::Delta(-lines.0));
} else if point.line >= (screen_lines - display_offset) {
let lines = point.line + display_offset - screen_lines + 1i32;
self.scroll_display(Scroll::Delta(-lines.0));
}
}
/// Jump to the end of a wide cell.
pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {
let flags = self.grid[point.line][point.column].flags;
match direction {
Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
point.column = Column(1);
point.line += 1;
},
Direction::Right if flags.contains(Flags::WIDE_CHAR) => {
point.column = cmp::min(point.column + 1, self.last_column());
},
Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {
if flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
let prev = point.sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
point = prev;
}
},
_ => (),
}
point
}
#[inline]
pub fn semantic_escape_chars(&self) -> &str {
&self.config.semantic_escape_chars
}
#[cfg(test)]
pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {
self.config.semantic_escape_chars = semantic_escape_chars.into();
}
/// Active terminal cursor style.
///
/// While vi mode is active, this will automatically return the vi mode cursor style.
#[inline]
pub fn cursor_style(&self) -> CursorStyle {
let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);
if self.mode.contains(TermMode::VI) {
self.config.vi_mode_cursor_style.unwrap_or(cursor_style)
} else {
cursor_style
}
}
pub fn colors(&self) -> &Colors {
&self.colors
}
/// Insert a linebreak at the current cursor position.
#[inline]
fn wrapline(&mut self)
where
T: EventListener,
{
if !self.mode.contains(TermMode::LINE_WRAP) {
return;
}
trace!("Wrapping input");
self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);
if self.grid.cursor.point.line + 1 >= self.scroll_region.end {
self.linefeed();
} else {
self.damage_cursor();
self.grid.cursor.point.line += 1;
}
self.grid.cursor.point.column = Column(0);
self.grid.cursor.input_needs_wrap = false;
self.damage_cursor();
}
/// Write `c` to the cell at the cursor position.
#[inline(always)]
fn write_at_cursor(&mut self, c: char) {
let c = self.grid.cursor.charsets[self.active_charset].map(c);
let fg = self.grid.cursor.template.fg;
let bg = self.grid.cursor.template.bg;
let flags = self.grid.cursor.template.flags;
let extra = self.grid.cursor.template.extra.clone();
let mut cursor_cell = self.grid.cursor_cell();
// Clear all related cells when overwriting a fullwidth cell.
if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {
// Remove wide char and spacer.
let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);
let point = self.grid.cursor.point;
if wide && point.column < self.last_column() {
self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);
} else if point.column > 0 {
self.grid[point.line][point.column - 1].clear_wide();
}
// Remove leading spacers.
if point.column <= 1 && point.line != self.topmost_line() {
let column = self.last_column();
self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
}
cursor_cell = self.grid.cursor_cell();
}
cursor_cell.c = c;
cursor_cell.fg = fg;
cursor_cell.bg = bg;
cursor_cell.flags = flags;
cursor_cell.extra = extra;
}
#[inline]
fn damage_cursor(&mut self) {
// The normal cursor coordinates are always in viewport.
let point =
Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);
self.damage.damage_point(point);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {
let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;
self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;
let new_mode = match apply {
KeyboardModesApplyBehavior::Replace => mode,
KeyboardModesApplyBehavior::Union => active_mode.union(mode),
KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),
};
trace!("Setting keyboard mode to {new_mode:?}");
self.mode |= new_mode;
}
}
impl<T> Dimensions for Term<T> {
#[inline]
fn columns(&self) -> usize {
self.grid.columns()
}
#[inline]
fn screen_lines(&self) -> usize {
self.grid.screen_lines()
}
#[inline]
fn total_lines(&self) -> usize {
self.grid.total_lines()
}
}
impl<T: EventListener> Handler for Term<T> {
/// A character to be displayed.
#[inline(never)]
fn input(&mut self, c: char) {
// Number of cells the char will occupy.
let width = match c.width() {
Some(width) => width,
None => return,
};
// Handle zero-width characters.
if width == 0 {
// Get previous column.
let mut column = self.grid.cursor.point.column;
if !self.grid.cursor.input_needs_wrap {
column.0 = column.saturating_sub(1);
}
// Put zerowidth characters over first fullwidth character cell.
let line = self.grid.cursor.point.line;
if self.grid[line][column].flags.contains(Flags::WIDE_CHAR_SPACER) {
column.0 = column.saturating_sub(1);
}
self.grid[line][column].push_zerowidth(c);
return;
}
// Move cursor to next line.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
}
// If in insert mode, first shift cells to the right.
let columns = self.columns();
if self.mode.contains(TermMode::INSERT) && self.grid.cursor.point.column + width < columns {
let line = self.grid.cursor.point.line;
let col = self.grid.cursor.point.column;
let row = &mut self.grid[line][..];
for col in (col.0..(columns - width)).rev() {
row.swap(col + width, col);
}
}
if width == 1 {
self.write_at_cursor(c);
} else {
if self.grid.cursor.point.column + 1 >= columns {
if self.mode.contains(TermMode::LINE_WRAP) {
// Insert placeholder before wide char if glyph does not fit in this row.
self.grid.cursor.template.flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);
self.wrapline();
} else {
// Prevent out of bounds crash when linewrapping is disabled.
self.grid.cursor.input_needs_wrap = true;
return;
}
}
// Write full width glyph to current cursor cell.
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR);
self.write_at_cursor(c);
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR);
// Write spacer to cell following the wide glyph.
self.grid.cursor.point.column += 1;
self.grid.cursor.template.flags.insert(Flags::WIDE_CHAR_SPACER);
self.write_at_cursor(' ');
self.grid.cursor.template.flags.remove(Flags::WIDE_CHAR_SPACER);
}
if self.grid.cursor.point.column + 1 < columns {
self.grid.cursor.point.column += 1;
} else {
self.grid.cursor.input_needs_wrap = true;
}
}
#[inline]
fn decaln(&mut self) {
trace!("Decalnning");
for line in (0..self.screen_lines()).map(Line::from) {
for column in 0..self.columns() {
let cell = &mut self.grid[line][Column(column)];
*cell = Cell::default();
cell.c = 'E';
}
}
self.mark_fully_damaged();
}
#[inline]
fn goto(&mut self, line: i32, col: usize) {
let line = Line(line);
let col = Column(col);
trace!("Going to: line={}, col={}", line, col);
let (y_offset, max_y) = if self.mode.contains(TermMode::ORIGIN) {
(self.scroll_region.start, self.scroll_region.end - 1)
} else {
(Line(0), self.bottommost_line())
};
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(cmp::min(line + y_offset, max_y), Line(0));
self.grid.cursor.point.column = cmp::min(col, self.last_column());
self.damage_cursor();
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn goto_line(&mut self, line: i32) {
trace!("Going to line: {}", line);
self.goto(line, self.grid.cursor.point.column.0)
}
#[inline]
fn goto_col(&mut self, col: usize) {
trace!("Going to column: {}", col);
self.goto(self.grid.cursor.point.line.0, col)
}
#[inline]
fn insert_blank(&mut self, count: usize) {
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure inserting within terminal bounds
let count = cmp::min(count, self.columns() - cursor.point.column.0);
let source = cursor.point.column;
let destination = cursor.point.column.0 + count;
let num_cells = self.columns() - destination;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in (0..num_cells).rev() {
row.swap(destination + offset, source.0 + offset);
}
// Cells were just moved out toward the end of the line;
// fill in between source and dest with blanks.
for cell in &mut row[source.0..destination] {
*cell = bg.into();
}
}
#[inline]
fn move_up(&mut self, lines: usize) {
trace!("Moving up: {}", lines);
let line = self.grid.cursor.point.line - lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_down(&mut self, lines: usize) {
trace!("Moving down: {}", lines);
let line = self.grid.cursor.point.line + lines;
let column = self.grid.cursor.point.column;
self.goto(line.0, column.0)
}
#[inline]
fn move_forward(&mut self, cols: usize) {
trace!("Moving forward: {}", cols);
let last_column = cmp::min(self.grid.cursor.point.column + cols, self.last_column());
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, self.grid.cursor.point.column.0, last_column.0);
self.grid.cursor.point.column = last_column;
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn move_backward(&mut self, cols: usize) {
trace!("Moving backward: {}", cols);
let column = self.grid.cursor.point.column.saturating_sub(cols);
let cursor_line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(cursor_line, column, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(column);
self.grid.cursor.input_needs_wrap = false;
}
#[inline]
fn identify_terminal(&mut self, intermediate: Option<char>) {
match intermediate {
None => {
trace!("Reporting primary device attributes");
let text = String::from("\x1b[?6c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
Some('>') => {
trace!("Reporting secondary device attributes");
let version = version_number(env!("CARGO_PKG_VERSION"));
let text = format!("\x1b[>0;{version};1c");
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("Unsupported device attributes intermediate"),
}
}
#[inline]
fn report_keyboard_mode(&mut self) {
if !self.config.kitty_keyboard {
return;
}
trace!("Reporting active keyboard mode");
let current_mode =
self.keyboard_mode_stack.last().unwrap_or(&KeyboardModes::NO_MODE).bits();
let text = format!("\x1b[?{current_mode}u");
self.event_proxy.send_event(Event::PtyWrite(text));
}
#[inline]
fn push_keyboard_mode(&mut self, mode: KeyboardModes) {
if !self.config.kitty_keyboard {
return;
}
trace!("Pushing `{mode:?}` keyboard mode into the stack");
if self.keyboard_mode_stack.len() >= KEYBOARD_MODE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of keyboard mode stack that exceeds its maximum depth",
removed
);
}
self.keyboard_mode_stack.push(mode);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn pop_keyboard_modes(&mut self, to_pop: u16) {
if !self.config.kitty_keyboard {
return;
}
trace!("Attempting to pop {to_pop} keyboard modes from the stack");
let new_len = self.keyboard_mode_stack.len().saturating_sub(to_pop as usize);
self.keyboard_mode_stack.truncate(new_len);
// Reload active mode.
let mode = self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE);
self.set_keyboard_mode(mode.into(), KeyboardModesApplyBehavior::Replace);
}
#[inline]
fn set_keyboard_mode(&mut self, mode: KeyboardModes, apply: KeyboardModesApplyBehavior) {
if !self.config.kitty_keyboard {
return;
}
self.set_keyboard_mode(mode.into(), apply);
}
#[inline]
fn device_status(&mut self, arg: usize) {
trace!("Reporting device status: {}", arg);
match arg {
5 => {
let text = String::from("\x1b[0n");
self.event_proxy.send_event(Event::PtyWrite(text));
},
6 => {
let pos = self.grid.cursor.point;
let text = format!("\x1b[{};{}R", pos.line + 1, pos.column + 1);
self.event_proxy.send_event(Event::PtyWrite(text));
},
_ => debug!("unknown device status query: {}", arg),
};
}
#[inline]
fn move_down_and_cr(&mut self, lines: usize) {
trace!("Moving down and cr: {}", lines);
let line = self.grid.cursor.point.line + lines;
self.goto(line.0, 0)
}
#[inline]
fn move_up_and_cr(&mut self, lines: usize) {
trace!("Moving up and cr: {}", lines);
let line = self.grid.cursor.point.line - lines;
self.goto(line.0, 0)
}
/// Insert tab at cursor position.
#[inline]
fn put_tab(&mut self, mut count: u16) {
// A tab after the last column is the same as a linebreak.
if self.grid.cursor.input_needs_wrap {
self.wrapline();
return;
}
while self.grid.cursor.point.column < self.columns() && count != 0 {
count -= 1;
let c = self.grid.cursor.charsets[self.active_charset].map('\t');
let cell = self.grid.cursor_cell();
if cell.c == ' ' {
cell.c = c;
}
loop {
if (self.grid.cursor.point.column + 1) == self.columns() {
break;
}
self.grid.cursor.point.column += 1;
if self.tabs[self.grid.cursor.point.column] {
break;
}
}
}
}
/// Backspace.
#[inline]
fn backspace(&mut self) {
trace!("Backspace");
if self.grid.cursor.point.column > Column(0) {
let line = self.grid.cursor.point.line.0 as usize;
let column = self.grid.cursor.point.column.0;
self.grid.cursor.point.column -= 1;
self.grid.cursor.input_needs_wrap = false;
self.damage.damage_line(line, column - 1, column);
}
}
/// Carriage return.
#[inline]
fn carriage_return(&mut self) {
trace!("Carriage return");
let new_col = 0;
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, new_col, self.grid.cursor.point.column.0);
self.grid.cursor.point.column = Column(new_col);
self.grid.cursor.input_needs_wrap = false;
}
/// Linefeed.
#[inline]
fn linefeed(&mut self) {
trace!("Linefeed");
let next = self.grid.cursor.point.line + 1;
if next == self.scroll_region.end {
self.scroll_up(1);
} else if next < self.screen_lines() {
self.damage_cursor();
self.grid.cursor.point.line += 1;
self.damage_cursor();
}
}
/// Set current position as a tabstop.
#[inline]
fn bell(&mut self) {
trace!("Bell");
self.event_proxy.send_event(Event::Bell);
}
#[inline]
fn substitute(&mut self) {
trace!("[unimplemented] Substitute");
}
/// Run LF/NL.
///
/// LF/NL mode has some interesting history. According to ECMA-48 4th
/// edition, in LINE FEED mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause only movement of the active position in
/// > the direction of the line progression.
///
/// In NEW LINE mode,
///
/// > The execution of the formatter functions LINE FEED (LF), FORM FEED
/// > (FF), LINE TABULATION (VT) cause movement to the line home position on
/// > the following line, the following form, etc. In the case of LF this is
/// > referred to as the New Line (NL) option.
///
/// Additionally, ECMA-48 4th edition says that this option is deprecated.
/// ECMA-48 5th edition only mentions this option (without explanation)
/// saying that it's been removed.
///
/// As an emulator, we need to support it since applications may still rely
/// on it.
#[inline]
fn newline(&mut self) {
self.linefeed();
if self.mode.contains(TermMode::LINE_FEED_NEW_LINE) {
self.carriage_return();
}
}
#[inline]
fn set_horizontal_tabstop(&mut self) {
trace!("Setting horizontal tabstop");
self.tabs[self.grid.cursor.point.column] = true;
}
#[inline]
fn scroll_up(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_up_relative(origin, lines);
}
#[inline]
fn scroll_down(&mut self, lines: usize) {
let origin = self.scroll_region.start;
self.scroll_down_relative(origin, lines);
}
#[inline]
fn insert_blank_lines(&mut self, lines: usize) {
trace!("Inserting blank {} lines", lines);
let origin = self.grid.cursor.point.line;
if self.scroll_region.contains(&origin) {
self.scroll_down_relative(origin, lines);
}
}
#[inline]
fn delete_lines(&mut self, lines: usize) {
let origin = self.grid.cursor.point.line;
let lines = cmp::min(self.screen_lines() - origin.0 as usize, lines);
trace!("Deleting {} lines", lines);
if lines > 0 && self.scroll_region.contains(&origin) {
self.scroll_up_relative(origin, lines);
}
}
#[inline]
fn erase_chars(&mut self, count: usize) {
let cursor = &self.grid.cursor;
trace!("Erasing chars: count={}, col={}", count, cursor.point.column);
let start = cursor.point.column;
let end = cmp::min(start + count, Column(self.columns()));
// Cleared cells have current background color set.
let bg = self.grid.cursor.template.bg;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, start.0, end.0);
let row = &mut self.grid[line];
for cell in &mut row[start..end] {
*cell = bg.into();
}
}
#[inline]
fn delete_chars(&mut self, count: usize) {
let columns = self.columns();
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
// Ensure deleting within terminal bounds.
let count = cmp::min(count, columns);
let start = cursor.point.column.0;
let end = cmp::min(start + count, columns - 1);
let num_cells = columns - end;
let line = cursor.point.line;
self.damage.damage_line(line.0 as usize, 0, self.columns() - 1);
let row = &mut self.grid[line][..];
for offset in 0..num_cells {
row.swap(start + offset, end + offset);
}
// Clear last `count` cells in the row. If deleting 1 char, need to delete
// 1 cell.
let end = columns - count;
for cell in &mut row[end..] {
*cell = bg.into();
}
}
#[inline]
fn move_backward_tabs(&mut self, count: u16) {
trace!("Moving backward {} tabs", count);
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == 0 {
break;
}
for i in (0..(col.0)).rev() {
if self.tabs[index::Column(i)] {
col = index::Column(i);
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, self.grid.cursor.point.column.0, old_col);
}
#[inline]
fn move_forward_tabs(&mut self, count: u16) {
trace!("Moving forward {} tabs", count);
let num_cols = self.columns();
let old_col = self.grid.cursor.point.column.0;
for _ in 0..count {
let mut col = self.grid.cursor.point.column;
if col == num_cols - 1 {
break;
}
for i in col.0 + 1..num_cols {
col = index::Column(i);
if self.tabs[col] {
break;
}
}
self.grid.cursor.point.column = col;
}
let line = self.grid.cursor.point.line.0 as usize;
self.damage.damage_line(line, old_col, self.grid.cursor.point.column.0);
}
#[inline]
fn save_cursor_position(&mut self) {
trace!("Saving cursor position");
self.grid.saved_cursor = self.grid.cursor.clone();
}
#[inline]
fn restore_cursor_position(&mut self) {
trace!("Restoring cursor position");
self.damage_cursor();
self.grid.cursor = self.grid.saved_cursor.clone();
self.damage_cursor();
}
#[inline]
fn clear_line(&mut self, mode: ansi::LineClearMode) {
trace!("Clearing line: {:?}", mode);
let cursor = &self.grid.cursor;
let bg = cursor.template.bg;
let point = cursor.point;
let (left, right) = match mode {
ansi::LineClearMode::Right if cursor.input_needs_wrap => return,
ansi::LineClearMode::Right => (point.column, Column(self.columns())),
ansi::LineClearMode::Left => (Column(0), point.column + 1),
ansi::LineClearMode::All => (Column(0), Column(self.columns())),
};
self.damage.damage_line(point.line.0 as usize, left.0, right.0 - 1);
let row = &mut self.grid[point.line];
for cell in &mut row[left..right] {
*cell = bg.into();
}
let range = self.grid.cursor.point.line..=self.grid.cursor.point.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
}
/// Set the indexed color value.
#[inline]
fn set_color(&mut self, index: usize, color: Rgb) {
trace!("Setting color[{}] = {:?}", index, color);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index] != Some(color) {
self.mark_fully_damaged();
}
self.colors[index] = Some(color);
}
/// Respond to a color query escape sequence.
#[inline]
fn dynamic_color_sequence(&mut self, prefix: String, index: usize, terminator: &str) {
trace!("Requested write of escape sequence for color code {}: color[{}]", prefix, index);
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ColorRequest(
index,
Arc::new(move |color| {
format!(
"\x1b]{};rgb:{1:02x}{1:02x}/{2:02x}{2:02x}/{3:02x}{3:02x}{4}",
prefix, color.r, color.g, color.b, terminator
)
}),
));
}
/// Reset the indexed color to original value.
#[inline]
fn reset_color(&mut self, index: usize) {
trace!("Resetting color[{}]", index);
// Damage terminal if the color changed and it's not the cursor.
if index != NamedColor::Cursor as usize && self.colors[index].is_some() {
self.mark_fully_damaged();
}
self.colors[index] = None;
}
/// Store data into clipboard.
#[inline]
fn clipboard_store(&mut self, clipboard: u8, base64: &[u8]) {
if !matches!(self.config.osc52, Osc52::OnlyCopy | Osc52::CopyPaste) {
debug!("Denied osc52 store");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
if let Ok(bytes) = Base64.decode(base64) {
if let Ok(text) = String::from_utf8(bytes) {
self.event_proxy.send_event(Event::ClipboardStore(clipboard_type, text));
}
}
}
/// Load data from clipboard.
#[inline]
fn clipboard_load(&mut self, clipboard: u8, terminator: &str) {
if !matches!(self.config.osc52, Osc52::OnlyPaste | Osc52::CopyPaste) {
debug!("Denied osc52 load");
return;
}
let clipboard_type = match clipboard {
b'c' => ClipboardType::Clipboard,
b'p' | b's' => ClipboardType::Selection,
_ => return,
};
let terminator = terminator.to_owned();
self.event_proxy.send_event(Event::ClipboardLoad(
clipboard_type,
Arc::new(move |text| {
let base64 = Base64.encode(text);
format!("\x1b]52;{};{}{}", clipboard as char, base64, terminator)
}),
));
}
#[inline]
fn clear_screen(&mut self, mode: ansi::ClearMode) {
trace!("Clearing screen: {:?}", mode);
let bg = self.grid.cursor.template.bg;
let screen_lines = self.screen_lines();
match mode {
ansi::ClearMode::Above => {
let cursor = self.grid.cursor.point;
// If clearing more than one line.
if cursor.line > 1 {
// Fully clear all lines before the current line.
self.grid.reset_region(..cursor.line);
}
// Clear up to the current column in the current line.
let end = cmp::min(cursor.column + 1, Column(self.columns()));
for cell in &mut self.grid[cursor.line][..end] {
*cell = bg.into();
}
let range = Line(0)..=cursor.line;
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::Below => {
let cursor = self.grid.cursor.point;
for cell in &mut self.grid[cursor.line][cursor.column..] {
*cell = bg.into();
}
if (cursor.line.0 as usize) < screen_lines - 1 {
self.grid.reset_region((cursor.line + 1)..);
}
let range = cursor.line..Line(screen_lines as i32);
self.selection = self.selection.take().filter(|s| !s.intersects_range(range));
},
ansi::ClearMode::All => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.grid.reset_region(..);
} else {
let old_offset = self.grid.display_offset();
self.grid.clear_viewport();
// Compute number of lines scrolled by clearing the viewport.
let lines = self.grid.display_offset().saturating_sub(old_offset);
self.vi_mode_cursor.point.line =
(self.vi_mode_cursor.point.line - lines).grid_clamp(self, Boundary::Grid);
}
self.selection = None;
},
ansi::ClearMode::Saved if self.history_size() > 0 => {
self.grid.clear_history();
self.vi_mode_cursor.point.line =
self.vi_mode_cursor.point.line.grid_clamp(self, Boundary::Cursor);
self.selection = self.selection.take().filter(|s| !s.intersects_range(..Line(0)));
},
// We have no history to clear.
ansi::ClearMode::Saved => (),
}
self.mark_fully_damaged();
}
#[inline]
fn clear_tabs(&mut self, mode: ansi::TabulationClearMode) {
trace!("Clearing tabs: {:?}", mode);
match mode {
ansi::TabulationClearMode::Current => {
self.tabs[self.grid.cursor.point.column] = false;
},
ansi::TabulationClearMode::All => {
self.tabs.clear_all();
},
}
}
/// Reset all important fields in the term struct.
#[inline]
fn reset_state(&mut self) {
if self.mode.contains(TermMode::ALT_SCREEN) {
mem::swap(&mut self.grid, &mut self.inactive_grid);
}
self.active_charset = Default::default();
self.cursor_style = None;
self.grid.reset();
self.inactive_grid.reset();
self.scroll_region = Line(0)..Line(self.screen_lines() as i32);
self.tabs = TabStops::new(self.columns());
self.title_stack = Vec::new();
self.title = None;
self.selection = None;
self.vi_mode_cursor = Default::default();
self.keyboard_mode_stack = Default::default();
self.inactive_keyboard_mode_stack = Default::default();
// Preserve vi mode across resets.
self.mode &= TermMode::VI;
self.mode.insert(TermMode::default());
self.event_proxy.send_event(Event::CursorBlinkingChange);
self.mark_fully_damaged();
}
#[inline]
fn reverse_index(&mut self) {
trace!("Reversing index");
// If cursor is at the top.
if self.grid.cursor.point.line == self.scroll_region.start {
self.scroll_down(1);
} else {
self.damage_cursor();
self.grid.cursor.point.line = cmp::max(self.grid.cursor.point.line - 1, Line(0));
self.damage_cursor();
}
}
#[inline]
fn set_hyperlink(&mut self, hyperlink: Option<Hyperlink>) {
trace!("Setting hyperlink: {:?}", hyperlink);
self.grid.cursor.template.set_hyperlink(hyperlink.map(|e| e.into()));
}
/// Set a terminal attribute.
#[inline]
fn terminal_attribute(&mut self, attr: Attr) {
trace!("Setting attribute: {:?}", attr);
let cursor = &mut self.grid.cursor;
match attr {
Attr::Foreground(color) => cursor.template.fg = color,
Attr::Background(color) => cursor.template.bg = color,
Attr::UnderlineColor(color) => cursor.template.set_underline_color(color),
Attr::Reset => {
cursor.template.fg = Color::Named(NamedColor::Foreground);
cursor.template.bg = Color::Named(NamedColor::Background);
cursor.template.flags = Flags::empty();
cursor.template.set_underline_color(None);
},
Attr::Reverse => cursor.template.flags.insert(Flags::INVERSE),
Attr::CancelReverse => cursor.template.flags.remove(Flags::INVERSE),
Attr::Bold => cursor.template.flags.insert(Flags::BOLD),
Attr::CancelBold => cursor.template.flags.remove(Flags::BOLD),
Attr::Dim => cursor.template.flags.insert(Flags::DIM),
Attr::CancelBoldDim => cursor.template.flags.remove(Flags::BOLD | Flags::DIM),
Attr::Italic => cursor.template.flags.insert(Flags::ITALIC),
Attr::CancelItalic => cursor.template.flags.remove(Flags::ITALIC),
Attr::Underline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERLINE);
},
Attr::DoubleUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOUBLE_UNDERLINE);
},
Attr::Undercurl => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::UNDERCURL);
},
Attr::DottedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DOTTED_UNDERLINE);
},
Attr::DashedUnderline => {
cursor.template.flags.remove(Flags::ALL_UNDERLINES);
cursor.template.flags.insert(Flags::DASHED_UNDERLINE);
},
Attr::CancelUnderline => cursor.template.flags.remove(Flags::ALL_UNDERLINES),
Attr::Hidden => cursor.template.flags.insert(Flags::HIDDEN),
Attr::CancelHidden => cursor.template.flags.remove(Flags::HIDDEN),
Attr::Strike => cursor.template.flags.insert(Flags::STRIKEOUT),
Attr::CancelStrike => cursor.template.flags.remove(Flags::STRIKEOUT),
_ => {
debug!("Term got unhandled attr: {:?}", attr);
},
}
}
#[inline]
fn set_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_private_mode", mode);
return;
},
};
trace!("Setting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.insert(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if !self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.insert(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.insert(TermMode::APP_CURSOR),
// Mouse protocols are mutually exclusive.
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MODE);
self.mode.insert(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.insert(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.insert(TermMode::BRACKETED_PASTE),
// Mouse encodings are mutually exclusive.
NamedPrivateMode::SgrMouse => {
self.mode.remove(TermMode::UTF8_MOUSE);
self.mode.insert(TermMode::SGR_MOUSE);
},
NamedPrivateMode::Utf8Mouse => {
self.mode.remove(TermMode::SGR_MOUSE);
self.mode.insert(TermMode::UTF8_MOUSE);
},
NamedPrivateMode::AlternateScroll => self.mode.insert(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.insert(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.insert(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = true;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn unset_private_mode(&mut self, mode: PrivateMode) {
let mode = match mode {
PrivateMode::Named(mode) => mode,
PrivateMode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_private_mode", mode);
return;
},
};
trace!("Unsetting private mode: {:?}", mode);
match mode {
NamedPrivateMode::UrgencyHints => self.mode.remove(TermMode::URGENCY_HINTS),
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
if self.mode.contains(TermMode::ALT_SCREEN) {
self.swap_alt();
}
},
NamedPrivateMode::ShowCursor => self.mode.remove(TermMode::SHOW_CURSOR),
NamedPrivateMode::CursorKeys => self.mode.remove(TermMode::APP_CURSOR),
NamedPrivateMode::ReportMouseClicks => {
self.mode.remove(TermMode::MOUSE_REPORT_CLICK);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.remove(TermMode::MOUSE_DRAG);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.remove(TermMode::MOUSE_MOTION);
self.event_proxy.send_event(Event::MouseCursorDirty);
},
NamedPrivateMode::ReportFocusInOut => self.mode.remove(TermMode::FOCUS_IN_OUT),
NamedPrivateMode::BracketedPaste => self.mode.remove(TermMode::BRACKETED_PASTE),
NamedPrivateMode::SgrMouse => self.mode.remove(TermMode::SGR_MOUSE),
NamedPrivateMode::Utf8Mouse => self.mode.remove(TermMode::UTF8_MOUSE),
NamedPrivateMode::AlternateScroll => self.mode.remove(TermMode::ALTERNATE_SCROLL),
NamedPrivateMode::LineWrap => self.mode.remove(TermMode::LINE_WRAP),
NamedPrivateMode::Origin => self.mode.remove(TermMode::ORIGIN),
NamedPrivateMode::ColumnMode => self.deccolm(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking = false;
self.event_proxy.send_event(Event::CursorBlinkingChange);
},
NamedPrivateMode::SyncUpdate => (),
}
}
#[inline]
fn report_private_mode(&mut self, mode: PrivateMode) {
trace!("Reporting private mode {mode:?}");
let state = match mode {
PrivateMode::Named(mode) => match mode {
NamedPrivateMode::CursorKeys => self.mode.contains(TermMode::APP_CURSOR).into(),
NamedPrivateMode::Origin => self.mode.contains(TermMode::ORIGIN).into(),
NamedPrivateMode::LineWrap => self.mode.contains(TermMode::LINE_WRAP).into(),
NamedPrivateMode::BlinkingCursor => {
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.blinking.into()
},
NamedPrivateMode::ShowCursor => self.mode.contains(TermMode::SHOW_CURSOR).into(),
NamedPrivateMode::ReportMouseClicks => {
self.mode.contains(TermMode::MOUSE_REPORT_CLICK).into()
},
NamedPrivateMode::ReportCellMouseMotion => {
self.mode.contains(TermMode::MOUSE_DRAG).into()
},
NamedPrivateMode::ReportAllMouseMotion => {
self.mode.contains(TermMode::MOUSE_MOTION).into()
},
NamedPrivateMode::ReportFocusInOut => {
self.mode.contains(TermMode::FOCUS_IN_OUT).into()
},
NamedPrivateMode::Utf8Mouse => self.mode.contains(TermMode::UTF8_MOUSE).into(),
NamedPrivateMode::SgrMouse => self.mode.contains(TermMode::SGR_MOUSE).into(),
NamedPrivateMode::AlternateScroll => {
self.mode.contains(TermMode::ALTERNATE_SCROLL).into()
},
NamedPrivateMode::UrgencyHints => {
self.mode.contains(TermMode::URGENCY_HINTS).into()
},
NamedPrivateMode::SwapScreenAndSetRestoreCursor => {
self.mode.contains(TermMode::ALT_SCREEN).into()
},
NamedPrivateMode::BracketedPaste => {
self.mode.contains(TermMode::BRACKETED_PASTE).into()
},
NamedPrivateMode::SyncUpdate => ModeState::Reset,
NamedPrivateMode::ColumnMode => ModeState::NotSupported,
},
PrivateMode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[?{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in set_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => self.mode.insert(TermMode::INSERT),
NamedMode::LineFeedNewLine => self.mode.insert(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn unset_mode(&mut self, mode: ansi::Mode) {
let mode = match mode {
ansi::Mode::Named(mode) => mode,
ansi::Mode::Unknown(mode) => {
debug!("Ignoring unknown mode {} in unset_mode", mode);
return;
},
};
trace!("Setting public mode: {:?}", mode);
match mode {
NamedMode::Insert => {
self.mode.remove(TermMode::INSERT);
self.mark_fully_damaged();
},
NamedMode::LineFeedNewLine => self.mode.remove(TermMode::LINE_FEED_NEW_LINE),
}
}
#[inline]
fn report_mode(&mut self, mode: ansi::Mode) {
trace!("Reporting mode {mode:?}");
let state = match mode {
ansi::Mode::Named(mode) => match mode {
NamedMode::Insert => self.mode.contains(TermMode::INSERT).into(),
NamedMode::LineFeedNewLine => {
self.mode.contains(TermMode::LINE_FEED_NEW_LINE).into()
},
},
ansi::Mode::Unknown(_) => ModeState::NotSupported,
};
self.event_proxy.send_event(Event::PtyWrite(format!(
"\x1b[{};{}$y",
mode.raw(),
state as u8,
)));
}
#[inline]
fn set_scrolling_region(&mut self, top: usize, bottom: Option<usize>) {
// Fallback to the last line as default.
let bottom = bottom.unwrap_or_else(|| self.screen_lines());
if top >= bottom {
debug!("Invalid scrolling region: ({};{})", top, bottom);
return;
}
// Bottom should be included in the range, but range end is not
// usually included. One option would be to use an inclusive
// range, but instead we just let the open range end be 1
// higher.
let start = Line(top as i32 - 1);
let end = Line(bottom as i32);
trace!("Setting scrolling region: ({};{})", start, end);
let screen_lines = Line(self.screen_lines() as i32);
self.scroll_region.start = cmp::min(start, screen_lines);
self.scroll_region.end = cmp::min(end, screen_lines);
self.goto(0, 0);
}
#[inline]
fn set_keypad_application_mode(&mut self) {
trace!("Setting keypad application mode");
self.mode.insert(TermMode::APP_KEYPAD);
}
#[inline]
fn unset_keypad_application_mode(&mut self) {
trace!("Unsetting keypad application mode");
self.mode.remove(TermMode::APP_KEYPAD);
}
#[inline]
fn configure_charset(&mut self, index: CharsetIndex, charset: StandardCharset) {
trace!("Configuring charset {:?} as {:?}", index, charset);
self.grid.cursor.charsets[index] = charset;
}
#[inline]
fn set_active_charset(&mut self, index: CharsetIndex) {
trace!("Setting active charset {:?}", index);
self.active_charset = index;
}
#[inline]
fn set_cursor_style(&mut self, style: Option<CursorStyle>) {
trace!("Setting cursor style {:?}", style);
self.cursor_style = style;
// Notify UI about blinking changes.
self.event_proxy.send_event(Event::CursorBlinkingChange);
}
#[inline]
fn set_cursor_shape(&mut self, shape: CursorShape) {
trace!("Setting cursor shape {:?}", shape);
let style = self.cursor_style.get_or_insert(self.config.default_cursor_style);
style.shape = shape;
}
#[inline]
fn set_title(&mut self, title: Option<String>) {
trace!("Setting title to '{:?}'", title);
self.title.clone_from(&title);
let title_event = match title {
Some(title) => Event::Title(title),
None => Event::ResetTitle,
};
self.event_proxy.send_event(title_event);
}
#[inline]
fn push_title(&mut self) {
trace!("Pushing '{:?}' onto title stack", self.title);
if self.title_stack.len() >= TITLE_STACK_MAX_DEPTH {
let removed = self.title_stack.remove(0);
trace!(
"Removing '{:?}' from bottom of title stack that exceeds its maximum depth",
removed
);
}
self.title_stack.push(self.title.clone());
}
#[inline]
fn pop_title(&mut self) {
trace!("Attempting to pop title from stack...");
if let Some(popped) = self.title_stack.pop() {
trace!("Title '{:?}' popped from stack", popped);
self.set_title(popped);
}
}
#[inline]
fn text_area_size_pixels(&mut self) {
self.event_proxy.send_event(Event::TextAreaSizeRequest(Arc::new(move |window_size| {
let height = window_size.num_lines * window_size.cell_height;
let width = window_size.num_cols * window_size.cell_width;
format!("\x1b[4;{height};{width}t")
})));
}
#[inline]
fn text_area_size_chars(&mut self) {
let text = format!("\x1b[8;{};{}t", self.screen_lines(), self.columns());
self.event_proxy.send_event(Event::PtyWrite(text));
}
}
/// The state of the [`Mode`] and [`PrivateMode`].
#[repr(u8)]
#[derive(Debug, Clone, Copy)]
enum ModeState {
/// The mode is not supported.
NotSupported = 0,
/// The mode is currently set.
Set = 1,
/// The mode is currently not set.
Reset = 2,
}
impl From<bool> for ModeState {
fn from(value: bool) -> Self {
if value {
Self::Set
} else {
Self::Reset
}
}
}
/// Terminal version for escape sequence reports.
///
/// This returns the current terminal version as a unique number based on alacritty_terminal's
/// semver version. The different versions are padded to ensure that a higher semver version will
/// always report a higher version number.
fn version_number(mut version: &str) -> usize {
if let Some(separator) = version.rfind('-') {
version = &version[..separator];
}
let mut version_number = 0;
let semver_versions = version.split('.');
for (i, semver_version) in semver_versions.rev().enumerate() {
let semver_number = semver_version.parse::<usize>().unwrap_or(0);
version_number += usize::pow(100, i as u32) * semver_number;
}
version_number
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClipboardType {
Clipboard,
Selection,
}
struct TabStops {
tabs: Vec<bool>,
}
impl TabStops {
#[inline]
fn new(columns: usize) -> TabStops {
TabStops { tabs: (0..columns).map(|i| i % INITIAL_TABSTOPS == 0).collect() }
}
/// Remove all tabstops.
#[inline]
fn clear_all(&mut self) {
unsafe {
ptr::write_bytes(self.tabs.as_mut_ptr(), 0, self.tabs.len());
}
}
/// Increase tabstop capacity.
#[inline]
fn resize(&mut self, columns: usize) {
let mut index = self.tabs.len();
self.tabs.resize_with(columns, || {
let is_tabstop = index % INITIAL_TABSTOPS == 0;
index += 1;
is_tabstop
});
}
}
impl Index<Column> for TabStops {
type Output = bool;
fn index(&self, index: Column) -> &bool {
&self.tabs[index.0]
}
}
impl IndexMut<Column> for TabStops {
fn index_mut(&mut self, index: Column) -> &mut bool {
self.tabs.index_mut(index.0)
}
}
/// Terminal cursor rendering information.
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct RenderableCursor {
pub shape: CursorShape,
pub point: Point,
}
impl RenderableCursor {
fn new<T>(term: &Term<T>) -> Self {
// Cursor position.
let vi_mode = term.mode().contains(TermMode::VI);
let mut point = if vi_mode { term.vi_mode_cursor.point } else { term.grid.cursor.point };
if term.grid[point].flags.contains(Flags::WIDE_CHAR_SPACER) {
point.column -= 1;
}
// Cursor shape.
let shape = if !vi_mode && !term.mode().contains(TermMode::SHOW_CURSOR) {
CursorShape::Hidden
} else {
term.cursor_style().shape
};
Self { shape, point }
}
}
/// Visible terminal content.
///
/// This contains all content required to render the current terminal view.
pub struct RenderableContent<'a> {
pub display_iter: GridIterator<'a, Cell>,
pub selection: Option<SelectionRange>,
pub cursor: RenderableCursor,
pub display_offset: usize,
pub colors: &'a color::Colors,
pub mode: TermMode,
}
impl<'a> RenderableContent<'a> {
fn new<T>(term: &'a Term<T>) -> Self {
Self {
display_iter: term.grid().display_iter(),
display_offset: term.grid().display_offset(),
cursor: RenderableCursor::new(term),
selection: term.selection.as_ref().and_then(|s| s.to_range(term)),
colors: &term.colors,
mode: *term.mode(),
}
}
}
/// Terminal test helpers.
pub mod test {
use super::*;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::event::VoidListener;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TermSize {
pub columns: usize,
pub screen_lines: usize,
}
impl TermSize {
pub fn new(columns: usize, screen_lines: usize) -> Self {
Self { columns, screen_lines }
}
}
impl Dimensions for TermSize {
fn total_lines(&self) -> usize {
self.screen_lines()
}
fn screen_lines(&self) -> usize {
self.screen_lines
}
fn columns(&self) -> usize {
self.columns
}
}
/// Construct a terminal from its content as string.
///
/// A `\n` will break line and `\r\n` will break line without wrapping.
///
/// # Examples
///
/// ```rust
/// use alacritty_terminal::term::test::mock_term;
///
/// // Create a terminal with the following cells:
/// //
/// // [h][e][l][l][o] <- WRAPLINE flag set
/// // [:][)][ ][ ][ ]
/// // [t][e][s][t][ ]
/// mock_term(
/// "\
/// hello\n:)\r\ntest",
/// );
/// ```
pub fn mock_term(content: &str) -> Term<VoidListener> {
let lines: Vec<&str> = content.split('\n').collect();
let num_cols = lines
.iter()
.map(|line| line.chars().filter(|c| *c != '\r').map(|c| c.width().unwrap()).sum())
.max()
.unwrap_or(0);
// Create terminal with the appropriate dimensions.
let size = TermSize::new(num_cols, lines.len());
let mut term = Term::new(Config::default(), &size, VoidListener);
// Fill terminal with content.
for (line, text) in lines.iter().enumerate() {
let line = Line(line as i32);
if !text.ends_with('\r') && line + 1 != lines.len() {
term.grid[line][Column(num_cols - 1)].flags.insert(Flags::WRAPLINE);
}
let mut index = 0;
for c in text.chars().take_while(|c| *c != '\r') {
term.grid[line][Column(index)].c = c;
// Handle fullwidth characters.
let width = c.width().unwrap();
if width == 2 {
term.grid[line][Column(index)].flags.insert(Flags::WIDE_CHAR);
term.grid[line][Column(index + 1)].flags.insert(Flags::WIDE_CHAR_SPACER);
}
index += width;
}
}
term
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
use crate::event::VoidListener;
use crate::grid::{Grid, Scroll};
use crate::index::{Column, Point, Side};
use crate::selection::{Selection, SelectionType};
use crate::term::cell::{Cell, Flags};
use crate::term::test::TermSize;
use crate::vte::ansi::{self, CharsetIndex, Handler, StandardCharset};
#[test]
fn scroll_display_page_up() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Scrollable amount to top is 11.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 10);
// Scrollable amount to top is 1.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
// Scrollable amount to top is 0.
term.scroll_display(Scroll::PageUp);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-2), Column(0)));
assert_eq!(term.grid.display_offset(), 11);
}
#[test]
fn scroll_display_page_down() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Change display_offset to topmost.
term.grid_mut().scroll_display(Scroll::Top);
term.vi_mode_cursor = ViModeCursor::new(Point::new(Line(-11), Column(0)));
// Scrollable amount to bottom is 11.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-1), Column(0)));
assert_eq!(term.grid.display_offset(), 1);
// Scrollable amount to bottom is 1.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
// Scrollable amount to bottom is 0.
term.scroll_display(Scroll::PageDown);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(0)));
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn simple_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 0..4 {
if i == 1 {
continue;
}
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(0)].c = ' ';
grid[Line(2)][Column(4)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(0)].c = ' ';
// Multiple lines contain an empty line.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(0), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(2), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\"aaa\"\n\n aaa ")));
// A wrapline.
term.selection = Some(Selection::new(
SelectionType::Simple,
Point { line: Line(2), column: Column(0) },
Side::Left,
));
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from(" aaa aaa\"")));
}
#[test]
fn semantic_selection_works() {
let size = TermSize::new(5, 3);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(3, 5, 0);
for i in 0..5 {
for j in 0..2 {
grid[Line(j)][Column(i)].c = 'a';
}
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
grid[Line(1)][Column(2)].c = '"';
grid[Line(0)][Column(4)].flags.insert(Flags::WRAPLINE);
let mut escape_chars = String::from("\"");
mem::swap(&mut term.grid, &mut grid);
mem::swap(&mut term.config.semantic_escape_chars, &mut escape_chars);
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(0), column: Column(4) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
{
term.selection = Some(Selection::new(
SelectionType::Semantic,
Point { line: Line(1), column: Column(1) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("aaa")));
}
}
#[test]
fn line_selection_works() {
let size = TermSize::new(5, 1);
let mut term = Term::new(Config::default(), &size, VoidListener);
let mut grid: Grid<Cell> = Grid::new(1, 5, 0);
for i in 0..5 {
grid[Line(0)][Column(i)].c = 'a';
}
grid[Line(0)][Column(0)].c = '"';
grid[Line(0)][Column(3)].c = '"';
mem::swap(&mut term.grid, &mut grid);
term.selection = Some(Selection::new(
SelectionType::Lines,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
assert_eq!(term.selection_to_string(), Some(String::from("\"aa\"a\n")));
}
#[test]
fn block_selection_works() {
let size = TermSize::new(5, 5);
let mut term = Term::new(Config::default(), &size, VoidListener);
let grid = term.grid_mut();
for i in 1..4 {
grid[Line(i)][Column(0)].c = '"';
for j in 1..4 {
grid[Line(i)][Column(j)].c = 'a';
}
grid[Line(i)][Column(4)].c = '"';
}
grid[Line(2)][Column(2)].c = ' ';
grid[Line(2)][Column(4)].flags.insert(Flags::WRAPLINE);
grid[Line(3)][Column(4)].c = ' ';
term.selection = Some(Selection::new(
SelectionType::Block,
Point { line: Line(0), column: Column(3) },
Side::Left,
));
// The same column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(3) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\na\na")));
// The first column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(0) }, Side::Left);
}
assert_eq!(term.selection_to_string(), Some(String::from("\n\"aa\n\"a\n\"aa")));
// The last column.
if let Some(s) = term.selection.as_mut() {
s.update(Point { line: Line(3), column: Column(4) }, Side::Right);
}
assert_eq!(term.selection_to_string(), Some(String::from("\na\"\na\"\na")));
}
/// Check that the grid can be serialized back and forth losslessly.
///
/// This test is in the term module as opposed to the grid since we want to
/// test this property with a T=Cell.
#[test]
#[cfg(feature = "serde")]
fn grid_serde() {
let grid: Grid<Cell> = Grid::new(24, 80, 0);
let serialized = serde_json::to_string(&grid).expect("ser");
let deserialized = serde_json::from_str::<Grid<Cell>>(&serialized).expect("de");
assert_eq!(deserialized, grid);
}
#[test]
fn input_line_drawing_character() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
let cursor = Point::new(Line(0), Column(0));
term.configure_charset(CharsetIndex::G0, StandardCharset::SpecialCharacterAndLineDrawing);
term.input('a');
assert_eq!(term.grid()[cursor].c, '▒');
}
#[test]
fn clearing_viewport_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
}
#[test]
fn clearing_viewport_with_vi_mode_keeps_history_position() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the viewport.
term.clear_screen(ansi::ClearMode::All);
assert_eq!(term.grid.display_offset(), 10);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(-5), Column(3)));
}
#[test]
fn clearing_scrollback_resets_display_offset() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Change the display area.
term.scroll_display(Scroll::Top);
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
}
#[test]
fn clearing_scrollback_sets_vi_cursor_into_viewport() {
let size = TermSize::new(10, 20);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..29 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
// Change the display area and the vi cursor position.
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point = Point::new(Line(-5), Column(3));
assert_eq!(term.grid.display_offset(), 10);
// Clear the scrollback buffer.
term.clear_screen(ansi::ClearMode::Saved);
assert_eq!(term.grid.display_offset(), 0);
assert_eq!(term.vi_mode_cursor.point, Point::new(Line(0), Column(3)));
}
#[test]
fn clear_saved_lines() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Add one line of scrollback.
term.grid.scroll_up(&(Line(0)..Line(1)), 1);
// Clear the history.
term.clear_screen(ansi::ClearMode::Saved);
// Make sure that scrolling does not change the grid.
let mut scrolled_grid = term.grid.clone();
scrolled_grid.scroll_display(Scroll::Top);
// Truncate grids for comparison.
scrolled_grid.truncate();
term.grid.truncate();
assert_eq!(term.grid, scrolled_grid);
}
#[test]
fn vi_cursor_keep_pos_on_scrollback_buffer() {
let size = TermSize::new(5, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 11 lines of scrollback.
for _ in 0..20 {
term.newline();
}
// Enable vi mode.
term.toggle_vi_mode();
term.scroll_display(Scroll::Top);
term.vi_mode_cursor.point.line = Line(-11);
term.linefeed();
assert_eq!(term.vi_mode_cursor.point.line, Line(-12));
}
#[test]
fn grow_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn grow_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 30;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 0);
assert_eq!(term.grid.cursor.point, Point::new(Line(19), Column(0)));
}
#[test]
fn shrink_lines_updates_active_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn shrink_lines_updates_inactive_cursor_pos() {
let mut size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Create 10 lines of scrollback.
for _ in 0..19 {
term.newline();
}
assert_eq!(term.history_size(), 10);
assert_eq!(term.grid.cursor.point, Point::new(Line(9), Column(0)));
// Enter alt screen.
term.set_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
// Increase visible lines.
size.screen_lines = 5;
term.resize(size);
// Leave alt screen.
term.unset_private_mode(NamedPrivateMode::SwapScreenAndSetRestoreCursor.into());
assert_eq!(term.history_size(), 15);
assert_eq!(term.grid.cursor.point, Point::new(Line(4), Column(0)));
}
#[test]
fn damage_public_usage() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
// Test that we damage input form [`Term::input`].
let left = term.grid.cursor.point.column.0;
term.input('d');
term.input('a');
term.input('m');
term.input('a');
term.input('g');
term.input('e');
let right = term.grid.cursor.point.column.0;
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), Some(LineDamageBounds { line: 0, left, right }));
assert_eq!(damaged_lines.next(), None);
term.reset_damage();
// Create scrollback.
for _ in 0..20 {
term.newline();
}
match term.damage() {
TermDamage::Full => (),
TermDamage::Partial(_) => panic!("Expected Full damage, however got Partial "),
};
term.reset_damage();
term.scroll_display(Scroll::Delta(10));
term.reset_damage();
// No damage when scrolled into viewport.
for idx in 0..term.columns() {
term.goto(idx as i32, idx);
}
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(damaged_lines.next(), None);
// Scroll back into the viewport, so we have 2 visible lines which terminal can write
// to.
term.scroll_display(Scroll::Delta(-2));
term.reset_damage();
term.goto(0, 0);
term.goto(1, 0);
term.goto(2, 0);
let display_offset = term.grid().display_offset();
let mut damaged_lines = match term.damage() {
TermDamage::Full => panic!("Expected partial damage, however got Full"),
TermDamage::Partial(damaged_lines) => damaged_lines,
};
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset, left: 0, right: 0 })
);
assert_eq!(
damaged_lines.next(),
Some(LineDamageBounds { line: display_offset + 1, left: 0, right: 0 })
);
assert_eq!(damaged_lines.next(), None);
}
#[test]
fn damage_cursor_movements() {
let size = TermSize::new(10, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
let num_cols = term.columns();
// Reset terminal for partial damage tests since it's initialized as fully damaged.
term.reset_damage();
term.goto(1, 1);
// NOTE While we can use `[Term::damage]` to access terminal damage information, in the
// following tests we will be accessing `term.damage.lines` directly to avoid adding extra
// damage information (like cursor and Vi cursor), which we're not testing.
assert_eq!(term.damage.lines[0], LineDamageBounds { line: 0, left: 0, right: 0 });
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
term.damage.reset(num_cols);
term.move_forward(3);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 4 });
term.damage.reset(num_cols);
term.move_backward(8);
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 0, right: 4 });
term.goto(5, 5);
term.damage.reset(num_cols);
term.backspace();
term.backspace();
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 5 });
term.damage.reset(num_cols);
term.move_up(1);
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
term.damage.reset(num_cols);
term.move_down(1);
term.move_down(1);
assert_eq!(term.damage.lines[4], LineDamageBounds { line: 4, left: 3, right: 3 });
assert_eq!(term.damage.lines[5], LineDamageBounds { line: 5, left: 3, right: 3 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
term.damage.reset(num_cols);
term.wrapline();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 0 });
term.move_forward(3);
term.move_up(1);
term.damage.reset(num_cols);
term.linefeed();
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 3, right: 3 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 3, right: 3 });
term.damage.reset(num_cols);
term.carriage_return();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 3 });
term.damage.reset(num_cols);
term.erase_chars(5);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 5 });
term.damage.reset(num_cols);
term.delete_chars(3);
let right = term.columns() - 1;
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.move_forward(term.columns());
term.damage.reset(num_cols);
term.move_backward_tabs(1);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.save_cursor_position();
term.goto(1, 1);
term.damage.reset(num_cols);
term.restore_cursor_position();
assert_eq!(term.damage.lines[1], LineDamageBounds { line: 1, left: 1, right: 1 });
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::All);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Left);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 0, right: 8 });
term.damage.reset(num_cols);
term.clear_line(ansi::LineClearMode::Right);
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right });
term.damage.reset(num_cols);
term.reverse_index();
assert_eq!(term.damage.lines[7], LineDamageBounds { line: 7, left: 8, right: 8 });
assert_eq!(term.damage.lines[6], LineDamageBounds { line: 6, left: 8, right: 8 });
}
#[test]
fn full_damage() {
let size = TermSize::new(100, 10);
let mut term = Term::new(Config::default(), &size, VoidListener);
assert!(term.damage.full);
for _ in 0..20 {
term.newline();
}
term.reset_damage();
term.clear_screen(ansi::ClearMode::Above);
assert!(term.damage.full);
term.reset_damage();
term.scroll_display(Scroll::Top);
assert!(term.damage.full);
term.reset_damage();
// Sequential call to scroll display without doing anything shouldn't damage.
term.scroll_display(Scroll::Top);
assert!(!term.damage.full);
term.reset_damage();
term.set_options(Config::default());
assert!(term.damage.full);
term.reset_damage();
term.scroll_down_relative(Line(5), 2);
assert!(term.damage.full);
term.reset_damage();
term.scroll_up_relative(Line(3), 2);
assert!(term.damage.full);
term.reset_damage();
term.deccolm();
assert!(term.damage.full);
term.reset_damage();
term.decaln();
assert!(term.damage.full);
term.reset_damage();
term.set_mode(NamedMode::Insert.into());
// Just setting `Insert` mode shouldn't mark terminal as damaged.
assert!(!term.damage.full);
term.reset_damage();
let color_index = 257;
term.set_color(color_index, Rgb::default());
assert!(term.damage.full);
term.reset_damage();
// Setting the same color once again shouldn't trigger full damage.
term.set_color(color_index, Rgb::default());
assert!(!term.damage.full);
term.reset_color(color_index);
assert!(term.damage.full);
term.reset_damage();
// We shouldn't trigger fully damage when cursor gets update.
term.set_color(NamedColor::Cursor as usize, Rgb::default());
assert!(!term.damage.full);
// However requesting terminal damage should mark terminal as fully damaged in `Insert`
// mode.
let _ = term.damage();
assert!(term.damage.full);
term.reset_damage();
term.unset_mode(NamedMode::Insert.into());
assert!(term.damage.full);
term.reset_damage();
// Keep this as a last check, so we don't have to deal with restoring from alt-screen.
term.swap_alt();
assert!(term.damage.full);
term.reset_damage();
let size = TermSize::new(10, 10);
term.resize(size);
assert!(term.damage.full);
}
#[test]
fn window_title() {
let size = TermSize::new(7, 17);
let mut term = Term::new(Config::default(), &size, VoidListener);
// Title None by default.
assert_eq!(term.title, None);
// Title can be set.
term.set_title(Some("Test".into()));
assert_eq!(term.title, Some("Test".into()));
// Title can be pushed onto stack.
term.push_title();
term.set_title(Some("Next".into()));
assert_eq!(term.title, Some("Next".into()));
assert_eq!(term.title_stack.first().unwrap(), &Some("Test".into()));
// Title can be popped from stack and set as the window title.
term.pop_title();
assert_eq!(term.title, Some("Test".into()));
assert!(term.title_stack.is_empty());
// Title stack doesn't grow infinitely.
for _ in 0..4097 {
term.push_title();
}
assert_eq!(term.title_stack.len(), 4096);
// Title and title stack reset when terminal state is reset.
term.push_title();
term.reset_state();
assert_eq!(term.title, None);
assert!(term.title_stack.is_empty());
// Title stack pops back to default.
term.title = None;
term.push_title();
term.set_title(Some("Test".into()));
term.pop_title();
assert_eq!(term.title, None);
// Title can be reset to default.
term.title = Some("Test".into());
term.set_title(None);
assert_eq!(term.title, None);
}
#[test]
fn parse_cargo_version() {
assert!(version_number(env!("CARGO_PKG_VERSION")) >= 10_01);
assert_eq!(version_number("0.0.1-dev"), 1);
assert_eq!(version_number("0.1.2-dev"), 1_02);
assert_eq!(version_number("1.2.3-dev"), 1_02_03);
assert_eq!(version_number("999.99.99"), 9_99_99_99);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "direction",
"type": "Direction"
}
],
"end_line": 926,
"name": "expand_wide",
"signature": "pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point",
"start_line": 901
} | {
"class_name": "impl<T> Term<T> {\n #[inline]\n pub fn scroll_display(&mut self, scroll: Scroll)\n where\n T: EventListener,\n {\n let old_display_offset = self.grid.display_offset();\n self.grid.scroll_display(scroll);\n self.event_proxy.send_event(Event::MouseCursorDirty);\n\n // Clamp vi mode cursor to the viewport.\n let viewport_start = -(self.grid.display_offset() as i32);\n let viewport_end = viewport_start + self.bottommost_line().0;\n let vi_cursor_line = &mut self.vi_mode_cursor.point.line.0;\n *vi_cursor_line = cmp::min(viewport_end, cmp::max(viewport_start, *vi_cursor_line));\n self.vi_mode_recompute_selection();\n\n // Damage everything if display offset changed.\n if old_display_offset != self.grid().display_offset() {\n self.mark_fully_damaged();\n }\n }\n\n pub fn new<D: Dimensions>(config: Config, dimensions: &D, event_proxy: T) -> Term<T> {\n let num_cols = dimensions.columns();\n let num_lines = dimensions.screen_lines();\n\n let history_size = config.scrolling_history;\n let grid = Grid::new(num_lines, num_cols, history_size);\n let inactive_grid = Grid::new(num_lines, num_cols, 0);\n\n let tabs = TabStops::new(grid.columns());\n\n let scroll_region = Line(0)..Line(grid.screen_lines() as i32);\n\n // Initialize terminal damage, covering the entire terminal upon launch.\n let damage = TermDamageState::new(num_cols, num_lines);\n\n Term {\n inactive_grid,\n scroll_region,\n event_proxy,\n damage,\n config,\n grid,\n tabs,\n inactive_keyboard_mode_stack: Default::default(),\n keyboard_mode_stack: Default::default(),\n active_charset: Default::default(),\n vi_mode_cursor: Default::default(),\n cursor_style: Default::default(),\n colors: color::Colors::default(),\n title_stack: Default::default(),\n is_focused: Default::default(),\n selection: Default::default(),\n title: Default::default(),\n mode: Default::default(),\n }\n }\n\n /// Collect the information about the changes in the lines, which\n /// could be used to minimize the amount of drawing operations.\n ///\n /// The user controlled elements, like `Vi` mode cursor and `Selection` are **not** part of the\n /// collected damage state. Those could easily be tracked by comparing their old and new\n /// value between adjacent frames.\n ///\n /// After reading damage [`reset_damage`] should be called.\n ///\n /// [`reset_damage`]: Self::reset_damage\n #[must_use]\n pub fn damage(&mut self) -> TermDamage<'_> {\n // Ensure the entire terminal is damaged after entering insert mode.\n // Leaving is handled in the ansi handler.\n if self.mode.contains(TermMode::INSERT) {\n self.mark_fully_damaged();\n }\n\n let previous_cursor = mem::replace(&mut self.damage.last_cursor, self.grid.cursor.point);\n\n if self.damage.full {\n return TermDamage::Full;\n }\n\n // Add information about old cursor position and new one if they are not the same, so we\n // cover everything that was produced by `Term::input`.\n if self.damage.last_cursor != previous_cursor {\n // Cursor coordinates are always inside viewport even if you have `display_offset`.\n let point = Point::new(previous_cursor.line.0 as usize, previous_cursor.column);\n self.damage.damage_point(point);\n }\n\n // Always damage current cursor.\n self.damage_cursor();\n\n // NOTE: damage which changes all the content when the display offset is non-zero (e.g.\n // scrolling) is handled via full damage.\n let display_offset = self.grid().display_offset();\n TermDamage::Partial(TermDamageIterator::new(&self.damage.lines, display_offset))\n }\n\n /// Resets the terminal damage information.\n pub fn reset_damage(&mut self) {\n self.damage.reset(self.columns());\n }\n\n #[inline]\n fn mark_fully_damaged(&mut self) {\n self.damage.full = true;\n }\n\n /// Set new options for the [`Term`].\n pub fn set_options(&mut self, options: Config)\n where\n T: EventListener,\n {\n let old_config = mem::replace(&mut self.config, options);\n\n let title_event = match &self.title {\n Some(title) => Event::Title(title.clone()),\n None => Event::ResetTitle,\n };\n\n self.event_proxy.send_event(title_event);\n\n if self.mode.contains(TermMode::ALT_SCREEN) {\n self.inactive_grid.update_history(self.config.scrolling_history);\n } else {\n self.grid.update_history(self.config.scrolling_history);\n }\n\n if self.config.kitty_keyboard != old_config.kitty_keyboard {\n self.keyboard_mode_stack = Vec::new();\n self.inactive_keyboard_mode_stack = Vec::new();\n self.mode.remove(TermMode::KITTY_KEYBOARD_PROTOCOL);\n }\n\n // Damage everything on config updates.\n self.mark_fully_damaged();\n }\n\n /// Convert the active selection to a String.\n pub fn selection_to_string(&self) -> Option<String> {\n let selection_range = self.selection.as_ref().and_then(|s| s.to_range(self))?;\n let SelectionRange { start, end, .. } = selection_range;\n\n let mut res = String::new();\n\n match self.selection.as_ref() {\n Some(Selection { ty: SelectionType::Block, .. }) => {\n for line in (start.line.0..end.line.0).map(Line::from) {\n res += self\n .line_to_string(line, start.column..end.column, start.column.0 != 0)\n .trim_end();\n res += \"\\n\";\n }\n\n res += self.line_to_string(end.line, start.column..end.column, true).trim_end();\n },\n Some(Selection { ty: SelectionType::Lines, .. }) => {\n res = self.bounds_to_string(start, end) + \"\\n\";\n },\n _ => {\n res = self.bounds_to_string(start, end);\n },\n }\n\n Some(res)\n }\n\n /// Convert range between two points to a String.\n pub fn bounds_to_string(&self, start: Point, end: Point) -> String {\n let mut res = String::new();\n\n for line in (start.line.0..=end.line.0).map(Line::from) {\n let start_col = if line == start.line { start.column } else { Column(0) };\n let end_col = if line == end.line { end.column } else { self.last_column() };\n\n res += &self.line_to_string(line, start_col..end_col, line == end.line);\n }\n\n res.strip_suffix('\\n').map(str::to_owned).unwrap_or(res)\n }\n\n /// Convert a single line in the grid to a String.\n fn line_to_string(\n &self,\n line: Line,\n mut cols: Range<Column>,\n include_wrapped_wide: bool,\n ) -> String {\n let mut text = String::new();\n\n let grid_line = &self.grid[line];\n let line_length = cmp::min(grid_line.line_length(), cols.end + 1);\n\n // Include wide char when trailing spacer is selected.\n if grid_line[cols.start].flags.contains(Flags::WIDE_CHAR_SPACER) {\n cols.start -= 1;\n }\n\n let mut tab_mode = false;\n for column in (cols.start.0..line_length.0).map(Column::from) {\n let cell = &grid_line[column];\n\n // Skip over cells until next tab-stop once a tab was found.\n if tab_mode {\n if self.tabs[column] || cell.c != ' ' {\n tab_mode = false;\n } else {\n continue;\n }\n }\n\n if cell.c == '\\t' {\n tab_mode = true;\n }\n\n if !cell.flags.intersects(Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER) {\n // Push cells primary character.\n text.push(cell.c);\n\n // Push zero-width characters.\n for c in cell.zerowidth().into_iter().flatten() {\n text.push(*c);\n }\n }\n }\n\n if cols.end >= self.columns() - 1\n && (line_length.0 == 0\n || !self.grid[line][line_length - 1].flags.contains(Flags::WRAPLINE))\n {\n text.push('\\n');\n }\n\n // If wide char is not part of the selection, but leading spacer is, include it.\n if line_length == self.columns()\n && line_length.0 >= 2\n && grid_line[line_length - 1].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER)\n && include_wrapped_wide\n {\n text.push(self.grid[line - 1i32][Column(0)].c);\n }\n\n text\n }\n\n /// Terminal content required for rendering.\n #[inline]\n pub fn renderable_content(&self) -> RenderableContent<'_>\n where\n T: EventListener,\n {\n RenderableContent::new(self)\n }\n\n /// Access to the raw grid data structure.\n pub fn grid(&self) -> &Grid<Cell> {\n &self.grid\n }\n\n /// Mutable access to the raw grid data structure.\n pub fn grid_mut(&mut self) -> &mut Grid<Cell> {\n &mut self.grid\n }\n\n /// Resize terminal to new dimensions.\n pub fn resize<S: Dimensions>(&mut self, size: S) {\n let old_cols = self.columns();\n let old_lines = self.screen_lines();\n\n let num_cols = size.columns();\n let num_lines = size.screen_lines();\n\n if old_cols == num_cols && old_lines == num_lines {\n debug!(\"Term::resize dimensions unchanged\");\n return;\n }\n\n debug!(\"New num_cols is {} and num_lines is {}\", num_cols, num_lines);\n\n // Move vi mode cursor with the content.\n let history_size = self.history_size();\n let mut delta = num_lines as i32 - old_lines as i32;\n let min_delta = cmp::min(0, num_lines as i32 - self.grid.cursor.point.line.0 - 1);\n delta = cmp::min(cmp::max(delta, min_delta), history_size as i32);\n self.vi_mode_cursor.point.line += delta;\n\n let is_alt = self.mode.contains(TermMode::ALT_SCREEN);\n self.grid.resize(!is_alt, num_lines, num_cols);\n self.inactive_grid.resize(is_alt, num_lines, num_cols);\n\n // Invalidate selection and tabs only when necessary.\n if old_cols != num_cols {\n self.selection = None;\n\n // Recreate tabs list.\n self.tabs.resize(num_cols);\n } else if let Some(selection) = self.selection.take() {\n let max_lines = cmp::max(num_lines, old_lines) as i32;\n let range = Line(0)..Line(max_lines);\n self.selection = selection.rotate(self, &range, -delta);\n }\n\n // Clamp vi cursor to viewport.\n let vi_point = self.vi_mode_cursor.point;\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let viewport_bottom = viewport_top + self.bottommost_line();\n self.vi_mode_cursor.point.line =\n cmp::max(cmp::min(vi_point.line, viewport_bottom), viewport_top);\n self.vi_mode_cursor.point.column = cmp::min(vi_point.column, self.last_column());\n\n // Reset scrolling region.\n self.scroll_region = Line(0)..Line(self.screen_lines() as i32);\n\n // Resize damage information.\n self.damage.resize(num_cols, num_lines);\n }\n\n /// Active terminal modes.\n #[inline]\n pub fn mode(&self) -> &TermMode {\n &self.mode\n }\n\n /// Swap primary and alternate screen buffer.\n pub fn swap_alt(&mut self) {\n if !self.mode.contains(TermMode::ALT_SCREEN) {\n // Set alt screen cursor to the current primary screen cursor.\n self.inactive_grid.cursor = self.grid.cursor.clone();\n\n // Drop information about the primary screens saved cursor.\n self.grid.saved_cursor = self.grid.cursor.clone();\n\n // Reset alternate screen contents.\n self.inactive_grid.reset_region(..);\n }\n\n mem::swap(&mut self.keyboard_mode_stack, &mut self.inactive_keyboard_mode_stack);\n let keyboard_mode =\n self.keyboard_mode_stack.last().copied().unwrap_or(KeyboardModes::NO_MODE).into();\n self.set_keyboard_mode(keyboard_mode, KeyboardModesApplyBehavior::Replace);\n\n mem::swap(&mut self.grid, &mut self.inactive_grid);\n self.mode ^= TermMode::ALT_SCREEN;\n self.selection = None;\n self.mark_fully_damaged();\n }\n\n /// Scroll screen down.\n ///\n /// Text moves down; clear at bottom\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_down_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling down relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n lines = cmp::min(lines, (self.scroll_region.end - origin).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection =\n self.selection.take().and_then(|s| s.rotate(self, ®ion, -(lines as i32)));\n\n // Scroll vi mode cursor.\n let line = &mut self.vi_mode_cursor.point.line;\n if region.start <= *line && region.end > *line {\n *line = cmp::min(*line + lines, region.end - 1);\n }\n\n // Scroll between origin and bottom\n self.grid.scroll_down(®ion, lines);\n self.mark_fully_damaged();\n }\n\n /// Scroll screen up\n ///\n /// Text moves up; clear at top\n /// Expects origin to be in scroll range.\n #[inline]\n fn scroll_up_relative(&mut self, origin: Line, mut lines: usize) {\n trace!(\"Scrolling up relative: origin={}, lines={}\", origin, lines);\n\n lines = cmp::min(lines, (self.scroll_region.end - self.scroll_region.start).0 as usize);\n\n let region = origin..self.scroll_region.end;\n\n // Scroll selection.\n self.selection = self.selection.take().and_then(|s| s.rotate(self, ®ion, lines as i32));\n\n self.grid.scroll_up(®ion, lines);\n\n // Scroll vi mode cursor.\n let viewport_top = Line(-(self.grid.display_offset() as i32));\n let top = if region.start == 0 { viewport_top } else { region.start };\n let line = &mut self.vi_mode_cursor.point.line;\n if (top <= *line) && region.end > *line {\n *line = cmp::max(*line - lines, top);\n }\n self.mark_fully_damaged();\n }\n\n fn deccolm(&mut self)\n where\n T: EventListener,\n {\n // Setting 132 column font makes no sense, but run the other side effects.\n // Clear scrolling region.\n self.set_scrolling_region(1, None);\n\n // Clear grid.\n self.grid.reset_region(..);\n self.mark_fully_damaged();\n }\n\n #[inline]\n pub fn exit(&mut self)\n where\n T: EventListener,\n {\n self.event_proxy.send_event(Event::Exit);\n }\n\n /// Toggle the vi mode.\n #[inline]\n pub fn toggle_vi_mode(&mut self)\n where\n T: EventListener,\n {\n self.mode ^= TermMode::VI;\n\n if self.mode.contains(TermMode::VI) {\n let display_offset = self.grid.display_offset() as i32;\n if self.grid.cursor.point.line > self.bottommost_line() - display_offset {\n // Move cursor to top-left if terminal cursor is not visible.\n let point = Point::new(Line(-display_offset), Column(0));\n self.vi_mode_cursor = ViModeCursor::new(point);\n } else {\n // Reset vi mode cursor position to match primary cursor.\n self.vi_mode_cursor = ViModeCursor::new(self.grid.cursor.point);\n }\n }\n\n // Update UI about cursor blinking state changes.\n self.event_proxy.send_event(Event::CursorBlinkingChange);\n }\n\n /// Move vi mode cursor.\n #[inline]\n pub fn vi_motion(&mut self, motion: ViMotion)\n where\n T: EventListener,\n {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Move cursor.\n self.vi_mode_cursor = self.vi_mode_cursor.motion(self, motion);\n self.vi_mode_recompute_selection();\n }\n\n /// Move vi cursor to a point in the grid.\n #[inline]\n pub fn vi_goto_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n // Move viewport to make point visible.\n self.scroll_to_point(point);\n\n // Move vi cursor to the point.\n self.vi_mode_cursor.point = point;\n\n self.vi_mode_recompute_selection();\n }\n\n /// Update the active selection to match the vi mode cursor position.\n #[inline]\n fn vi_mode_recompute_selection(&mut self) {\n // Require vi mode to be active.\n if !self.mode.contains(TermMode::VI) {\n return;\n }\n\n // Update only if non-empty selection is present.\n if let Some(selection) = self.selection.as_mut().filter(|s| !s.is_empty()) {\n selection.update(self.vi_mode_cursor.point, Side::Left);\n selection.include_all();\n }\n }\n\n /// Scroll display to point if it is outside of viewport.\n pub fn scroll_to_point(&mut self, point: Point)\n where\n T: EventListener,\n {\n let display_offset = self.grid.display_offset() as i32;\n let screen_lines = self.grid.screen_lines() as i32;\n\n if point.line < -display_offset {\n let lines = point.line + display_offset;\n self.scroll_display(Scroll::Delta(-lines.0));\n } else if point.line >= (screen_lines - display_offset) {\n let lines = point.line + display_offset - screen_lines + 1i32;\n self.scroll_display(Scroll::Delta(-lines.0));\n }\n }\n\n /// Jump to the end of a wide cell.\n pub fn expand_wide(&self, mut point: Point, direction: Direction) -> Point {\n let flags = self.grid[point.line][point.column].flags;\n\n match direction {\n Direction::Right if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n point.column = Column(1);\n point.line += 1;\n },\n Direction::Right if flags.contains(Flags::WIDE_CHAR) => {\n point.column = cmp::min(point.column + 1, self.last_column());\n },\n Direction::Left if flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) => {\n if flags.contains(Flags::WIDE_CHAR_SPACER) {\n point.column -= 1;\n }\n\n let prev = point.sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n point = prev;\n }\n },\n _ => (),\n }\n\n point\n }\n\n #[inline]\n pub fn semantic_escape_chars(&self) -> &str {\n &self.config.semantic_escape_chars\n }\n\n #[cfg(test)]\n pub(crate) fn set_semantic_escape_chars(&mut self, semantic_escape_chars: &str) {\n self.config.semantic_escape_chars = semantic_escape_chars.into();\n }\n\n /// Active terminal cursor style.\n ///\n /// While vi mode is active, this will automatically return the vi mode cursor style.\n #[inline]\n pub fn cursor_style(&self) -> CursorStyle {\n let cursor_style = self.cursor_style.unwrap_or(self.config.default_cursor_style);\n\n if self.mode.contains(TermMode::VI) {\n self.config.vi_mode_cursor_style.unwrap_or(cursor_style)\n } else {\n cursor_style\n }\n }\n\n pub fn colors(&self) -> &Colors {\n &self.colors\n }\n\n /// Insert a linebreak at the current cursor position.\n #[inline]\n fn wrapline(&mut self)\n where\n T: EventListener,\n {\n if !self.mode.contains(TermMode::LINE_WRAP) {\n return;\n }\n\n trace!(\"Wrapping input\");\n\n self.grid.cursor_cell().flags.insert(Flags::WRAPLINE);\n\n if self.grid.cursor.point.line + 1 >= self.scroll_region.end {\n self.linefeed();\n } else {\n self.damage_cursor();\n self.grid.cursor.point.line += 1;\n }\n\n self.grid.cursor.point.column = Column(0);\n self.grid.cursor.input_needs_wrap = false;\n self.damage_cursor();\n }\n\n /// Write `c` to the cell at the cursor position.\n #[inline(always)]\n fn write_at_cursor(&mut self, c: char) {\n let c = self.grid.cursor.charsets[self.active_charset].map(c);\n let fg = self.grid.cursor.template.fg;\n let bg = self.grid.cursor.template.bg;\n let flags = self.grid.cursor.template.flags;\n let extra = self.grid.cursor.template.extra.clone();\n\n let mut cursor_cell = self.grid.cursor_cell();\n\n // Clear all related cells when overwriting a fullwidth cell.\n if cursor_cell.flags.intersects(Flags::WIDE_CHAR | Flags::WIDE_CHAR_SPACER) {\n // Remove wide char and spacer.\n let wide = cursor_cell.flags.contains(Flags::WIDE_CHAR);\n let point = self.grid.cursor.point;\n if wide && point.column < self.last_column() {\n self.grid[point.line][point.column + 1].flags.remove(Flags::WIDE_CHAR_SPACER);\n } else if point.column > 0 {\n self.grid[point.line][point.column - 1].clear_wide();\n }\n\n // Remove leading spacers.\n if point.column <= 1 && point.line != self.topmost_line() {\n let column = self.last_column();\n self.grid[point.line - 1i32][column].flags.remove(Flags::LEADING_WIDE_CHAR_SPACER);\n }\n\n cursor_cell = self.grid.cursor_cell();\n }\n\n cursor_cell.c = c;\n cursor_cell.fg = fg;\n cursor_cell.bg = bg;\n cursor_cell.flags = flags;\n cursor_cell.extra = extra;\n }\n\n #[inline]\n fn damage_cursor(&mut self) {\n // The normal cursor coordinates are always in viewport.\n let point =\n Point::new(self.grid.cursor.point.line.0 as usize, self.grid.cursor.point.column);\n self.damage.damage_point(point);\n }\n\n #[inline]\n fn set_keyboard_mode(&mut self, mode: TermMode, apply: KeyboardModesApplyBehavior) {\n let active_mode = self.mode & TermMode::KITTY_KEYBOARD_PROTOCOL;\n self.mode &= !TermMode::KITTY_KEYBOARD_PROTOCOL;\n let new_mode = match apply {\n KeyboardModesApplyBehavior::Replace => mode,\n KeyboardModesApplyBehavior::Union => active_mode.union(mode),\n KeyboardModesApplyBehavior::Difference => active_mode.difference(mode),\n };\n trace!(\"Setting keyboard mode to {new_mode:?}\");\n self.mode |= new_mode;\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
next_match_right | alacritty-master/alacritty_terminal/src/term/search.rs | fn next_match_right(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_left(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line + max_lines).grid_clamp(self, Boundary::None);
Point::new(line, self.last_column())
},
_ => end.sub(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line < start.line
|| match_point.line > origin.line
|| (match_point.line == origin.line && match_point.column >= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
} | use std::cmp::max;
use std::error::Error;
use std::mem;
use std::ops::RangeInclusive;
use log::{debug, warn};
use regex_automata::hybrid::dfa::{Builder, Cache, Config, DFA};
pub use regex_automata::hybrid::BuildError;
use regex_automata::nfa::thompson::Config as ThompsonConfig;
use regex_automata::util::syntax::Config as SyntaxConfig;
use regex_automata::{Anchored, Input, MatchKind};
use crate::grid::{BidirectionalIterator, Dimensions, GridIterator, Indexed};
use crate::index::{Boundary, Column, Direction, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
/// Used to match equal brackets, when performing a bracket-pair selection.
const BRACKET_PAIRS: [(char, char); 4] = [('(', ')'), ('[', ']'), ('{', '}'), ('<', '>')];
pub type Match = RangeInclusive<Point>;
/// Terminal regex search state.
#[derive(Clone, Debug)]
pub struct RegexSearch {
left_fdfa: LazyDfa,
left_rdfa: LazyDfa,
right_rdfa: LazyDfa,
right_fdfa: LazyDfa,
}
impl RegexSearch {
/// Build the forward and backward search DFAs.
pub fn new(search: &str) -> Result<RegexSearch, Box<BuildError>> {
// Setup configs for both DFA directions.
//
// Bounds are based on Regex's meta engine:
// https://github.com/rust-lang/regex/blob/061ee815ef2c44101dba7b0b124600fcb03c1912/regex-automata/src/meta/wrappers.rs#L581-L599
let has_uppercase = search.chars().any(|c| c.is_uppercase());
let syntax_config = SyntaxConfig::new().case_insensitive(!has_uppercase);
let config =
Config::new().minimum_cache_clear_count(Some(3)).minimum_bytes_per_state(Some(10));
let max_size = config.get_cache_capacity();
let thompson_config = ThompsonConfig::new().nfa_size_limit(Some(max_size));
// Create DFAs to find start/end in right-to-left search.
let left_rdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
true,
)?;
let has_empty = left_rdfa.dfa.get_nfa().has_empty();
let left_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Left,
has_empty,
)?;
// Create DFAs to find start/end in left-to-right search.
let right_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
has_empty,
)?;
let right_rdfa =
LazyDfa::new(search, config, syntax_config, thompson_config, Direction::Left, true)?;
Ok(RegexSearch { left_fdfa, left_rdfa, right_fdfa, right_rdfa })
}
}
/// Runtime-evaluated DFA.
#[derive(Clone, Debug)]
struct LazyDfa {
dfa: DFA,
cache: Cache,
direction: Direction,
match_all: bool,
}
impl LazyDfa {
fn new(
search: &str,
mut config: Config,
syntax: SyntaxConfig,
mut thompson: ThompsonConfig,
direction: Direction,
match_all: bool,
) -> Result<Self, Box<BuildError>> {
thompson = match direction {
Direction::Left => thompson.reverse(true),
Direction::Right => thompson.reverse(false),
};
config = if match_all {
config.match_kind(MatchKind::All)
} else {
config.match_kind(MatchKind::LeftmostFirst)
};
// Create the DFA.
let dfa =
Builder::new().configure(config).syntax(syntax).thompson(thompson).build(search)?;
let cache = dfa.create_cache();
Ok(Self { direction, cache, dfa, match_all })
}
}
impl<T> Term<T> {
/// Get next search match in the specified direction.
pub fn search_next(
&self,
regex: &mut RegexSearch,
mut origin: Point,
direction: Direction,
side: Side,
mut max_lines: Option<usize>,
) -> Option<Match> {
origin = self.expand_wide(origin, direction);
max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());
match direction {
Direction::Right => self.next_match_right(regex, origin, side, max_lines),
Direction::Left => self.next_match_left(regex, origin, side, max_lines),
}
}
/// Find the next match to the right of the origin.
fn next_match_right(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_left(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line + max_lines).grid_clamp(self, Boundary::None);
Point::new(line, self.last_column())
},
_ => end.sub(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line < start.line
|| match_point.line > origin.line
|| (match_point.line == origin.line && match_point.column >= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Find the next match to the left of the origin.
fn next_match_left(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_right(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line - max_lines).grid_clamp(self, Boundary::None);
Point::new(line, Column(0))
},
_ => end.add(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line > start.line
|| match_point.line < origin.line
|| (match_point.line == origin.line && match_point.column <= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Get the side of a match.
fn match_side(regex_match: &Match, side: Side) -> Point {
match side {
Side::Right => *regex_match.end(),
Side::Left => *regex_match.start(),
}
}
/// Find the next regex match to the left of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_left(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;
let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match to the right of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_right(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;
let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match.
///
/// This will always return the side of the first match which is farthest from the start point.
fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {
match self.regex_search_internal(start, end, regex) {
Ok(regex_match) => regex_match,
Err(err) => {
warn!("Regex exceeded complexity limit");
debug!(" {err}");
None
},
}
}
/// Find the next regex match.
///
/// To automatically log regex complexity errors, use [`Self::regex_search`] instead.
fn regex_search_internal(
&self,
start: Point,
end: Point,
regex: &mut LazyDfa,
) -> Result<Option<Point>, Box<dyn Error>> {
let topmost_line = self.topmost_line();
let screen_lines = self.screen_lines() as i32;
let last_column = self.last_column();
// Advance the iterator.
let next = match regex.direction {
Direction::Right => GridIterator::next,
Direction::Left => GridIterator::prev,
};
// Get start state for the DFA.
let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };
let input = Input::new(&[]).anchored(regex_anchored);
let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();
let mut iter = self.grid.iter_from(start);
let mut regex_match = None;
let mut done = false;
let mut cell = iter.cell();
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
let mut c = cell.c;
let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
let mut point = iter.point();
let mut last_point = point;
let mut consumed_bytes = 0;
// Reset the regex state to restart the search.
macro_rules! reset_state {
() => {{
state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;
consumed_bytes = 0;
regex_match = None;
}};
}
'outer: loop {
// Convert char to array of bytes.
let mut buf = [0; 4];
let utf8_len = c.encode_utf8(&mut buf).len();
// Pass char to DFA as individual bytes.
for i in 0..utf8_len {
// Inverse byte order when going left.
let byte = match regex.direction {
Direction::Right => buf[i],
Direction::Left => buf[utf8_len - i - 1],
};
state = regex.dfa.next_state(&mut regex.cache, state, byte)?;
consumed_bytes += 1;
if i == 0 && state.is_match() {
// Matches require one additional BYTE of lookahead, so we check the match state
// for the first byte of every new character to determine if the last character
// was a match.
regex_match = Some(last_point);
} else if state.is_dead() {
if consumed_bytes == 2 {
// Reset search if we found an empty match.
//
// With an unanchored search, a dead state only occurs after the end of a
// match has been found. While we want to abort after the first match has
// ended, we don't want empty matches since we cannot highlight them.
//
// So once we encounter an empty match, we reset our parser state and clear
// the match, effectively starting a new search one character farther than
// before.
//
// An empty match requires consuming `2` bytes, since the first byte will
// report the match for the empty string, while the second byte then
// reports the dead state indicating the first character isn't part of the
// match.
reset_state!();
// Retry this character if first byte caused failure.
//
// After finding an empty match, we want to advance the search start by one
// character. So if the first character has multiple bytes and the dead
// state isn't reached at `i == 0`, then we continue with the rest of the
// loop to advance the parser by one character.
if i == 0 {
continue 'outer;
}
} else {
// Abort on dead state.
break 'outer;
}
}
}
// Stop once we've reached the target point.
if point == end || done {
// When reaching the end-of-input, we need to notify the parser that no look-ahead
// is possible and check for state changes.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(point);
} else if state.is_dead() && consumed_bytes == 1 {
// Ignore empty matches.
regex_match = None;
}
break;
}
// Advance grid cell iterator.
let mut cell = match next(&mut iter) {
Some(Indexed { cell, .. }) => cell,
None => {
// Wrap around to other end of the scrollback buffer.
let line = topmost_line - point.line + screen_lines - 1;
let start = Point::new(line, last_column - point.column);
iter = self.grid.iter_from(start);
iter.cell()
},
};
// Check for completion before potentially skipping over fullwidth characters.
done = iter.point() == end;
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
c = cell.c;
let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
last_point = mem::replace(&mut point, iter.point());
// Handle linebreaks.
if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)
|| (last_point.column == Column(0) && point.column == last_column && !wrapped)
{
// When reaching the end-of-input, we need to notify the parser that no
// look-ahead is possible and check if the current state is still a match.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(last_point);
}
match regex_match {
// Stop if we found a non-empty match before the linebreak.
Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {
break;
},
_ => reset_state!(),
}
}
last_wrapped = wrapped;
}
Ok(regex_match)
}
/// Advance a grid iterator over fullwidth characters.
fn skip_fullwidth<'a>(
&self,
iter: &'a mut GridIterator<'_, Cell>,
cell: &mut &'a Cell,
direction: Direction,
) {
match direction {
// In the alternate screen buffer there might not be a wide char spacer after a wide
// char, so we only advance the iterator when the wide char is not in the last column.
Direction::Right
if cell.flags.contains(Flags::WIDE_CHAR)
&& iter.point().column < self.last_column() =>
{
iter.next();
},
Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.next() {
*cell = new_cell;
}
iter.next();
},
Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {
*cell = new_cell;
}
let prev = iter.point().sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
iter.prev();
}
},
_ => (),
}
}
/// Find next matching bracket.
pub fn bracket_search(&self, point: Point) -> Option<Point> {
let start_char = self.grid[point].c;
// Find the matching bracket we're looking for
let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {
if open == &start_char {
Some((true, *close))
} else if close == &start_char {
Some((false, *open))
} else {
None
}
})?;
let mut iter = self.grid.iter_from(point);
// For every character match that equals the starting bracket, we
// ignore one bracket of the opposite type.
let mut skip_pairs = 0;
loop {
// Check the next cell
let cell = if forward { iter.next() } else { iter.prev() };
// Break if there are no more cells
let cell = match cell {
Some(cell) => cell,
None => break,
};
// Check if the bracket matches
if cell.c == end_char && skip_pairs == 0 {
return Some(cell.point);
} else if cell.c == start_char {
skip_pairs += 1;
} else if cell.c == end_char {
skip_pairs -= 1;
}
}
None
}
/// Find left end of semantic block.
#[must_use]
pub fn semantic_search_left(&self, point: Point) -> Point {
match self.inline_search_left(point, self.semantic_escape_chars()) {
// If we found a match, reverse for at least one cell, skipping over wide cell spacers.
Ok(point) => {
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
self.grid
.iter_from(point)
.find(|cell| !cell.flags.intersects(wide_spacer))
.map_or(point, |cell| cell.point)
},
Err(point) => point,
}
}
/// Find right end of semantic block.
#[must_use]
pub fn semantic_search_right(&self, point: Point) -> Point {
match self.inline_search_right(point, self.semantic_escape_chars()) {
Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),
Err(point) => point,
}
}
/// Searching to the left, find the next character contained in `needles`.
pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let mut iter = self.grid.iter_from(point);
let last_column = self.columns() - 1;
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
while let Some(cell) = iter.prev() {
if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
}
Err(point)
}
/// Searching to the right, find the next character contained in `needles`.
pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
let last_column = self.columns() - 1;
// Immediately stop if start point in on line break.
if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {
return Err(point);
}
for cell in self.grid.iter_from(point) {
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
}
Err(point)
}
/// Find the beginning of the current line across linewraps.
pub fn line_search_left(&self, mut point: Point) -> Point {
while point.line > self.topmost_line()
&& self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line -= 1;
}
point.column = Column(0);
point
}
/// Find the end of the current line across linewraps.
pub fn line_search_right(&self, mut point: Point) -> Point {
while point.line + 1 < self.screen_lines()
&& self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line += 1;
}
point.column = self.last_column();
point
}
}
/// Iterator over regex matches.
pub struct RegexIter<'a, T> {
point: Point,
end: Point,
direction: Direction,
regex: &'a mut RegexSearch,
term: &'a Term<T>,
done: bool,
}
impl<'a, T> RegexIter<'a, T> {
pub fn new(
start: Point,
end: Point,
direction: Direction,
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> Self {
Self { point: start, done: false, end, direction, term, regex }
}
/// Skip one cell, advancing the origin point to the next one.
fn skip(&mut self) {
self.point = self.term.expand_wide(self.point, self.direction);
self.point = match self.direction {
Direction::Right => self.point.add(self.term, Boundary::None, 1),
Direction::Left => self.point.sub(self.term, Boundary::None, 1),
};
}
/// Get the next match in the specified direction.
fn next_match(&mut self) -> Option<Match> {
match self.direction {
Direction::Right => self.term.regex_search_right(self.regex, self.point, self.end),
Direction::Left => self.term.regex_search_left(self.regex, self.point, self.end),
}
}
}
impl<T> Iterator for RegexIter<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
return None;
}
// Since the end itself might be a single cell match, we search one more time.
if self.point == self.end {
self.done = true;
}
let regex_match = self.next_match()?;
self.point = *regex_match.end();
if self.point == self.end {
// Stop when the match terminates right on the end limit.
self.done = true;
} else {
// Move the new search origin past the match.
self.skip();
}
Some(regex_match)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Line};
use crate::term::test::{mock_term, TermSize};
use crate::term::Config;
#[test]
fn regex_right() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(4), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn regex_left() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(4), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nested_regex() {
#[rustfmt::skip]
let term = mock_term("\
Ala -> Alacritty -> critty\r\n\
critty\
");
// Greedy stopped at linebreak.
let mut regex = RegexSearch::new("Ala.*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(25));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
// Greedy stopped at dead state.
let mut regex = RegexSearch::new("Ala[^y]*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(15));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn no_match_right() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(2), Column(4));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn no_match_left() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(2), Column(4));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), None);
}
#[test]
fn include_linebreak_left() {
#[rustfmt::skip]
let term = mock_term("\
testing123\r\n\
xxx\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(9));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn include_linebreak_right() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
testing123\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(9));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
}
#[test]
fn skip_dead_cell() {
let term = mock_term("alacritty");
// Make sure dead state cell is skipped when reversing.
let mut regex = RegexSearch::new("alacrit").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(6));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn reverse_search_dead_recovery() {
let term = mock_term("zooo lense");
// Make sure the reverse DFA operates the same as a forward DFA.
let mut regex = RegexSearch::new("zoo").unwrap();
let start = Point::new(Line(0), Column(9));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multibyte_unicode() {
let term = mock_term("testвосибing");
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(11));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(11));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_multibyte_unicode() {
let term = mock_term("testвосиб");
let mut regex = RegexSearch::new("te.*и").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(8));
let match_end = Point::new(Line(0), Column(7));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn fullwidth() {
let term = mock_term("a🦇x🦇");
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(5));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn singlecell_fullwidth() {
let term = mock_term("🦇");
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(1));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_fullwidth() {
let term = mock_term("jarr🦇");
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
// Ensure ending without a match doesn't loop indefinitely.
let mut regex = RegexSearch::new("x").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
let mut regex = RegexSearch::new("x").unwrap();
let match_end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, match_end), None);
// Ensure match is captured when only partially inside range.
let mut regex = RegexSearch::new("jarr🦇").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn wrapping() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
xxx\
");
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=match_end));
}
#[test]
fn wrapping_into_fullwidth() {
#[rustfmt::skip]
let term = mock_term("\
🦇xx\r\n\
xx🦇\
");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multiline() {
#[rustfmt::skip]
let term = mock_term("\
test \r\n\
test\
");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
let match_start = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match() {
#[rustfmt::skip]
let term = mock_term(" abc ");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match_multibyte() {
#[rustfmt::skip]
let term = mock_term(" ↑");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn empty_match_multiline() {
#[rustfmt::skip]
let term = mock_term("abc \nxxx");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(3));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn leading_spacer() {
#[rustfmt::skip]
let mut term = mock_term("\
xxx \n\
🦇xx\
");
term.grid[Line(0)][Column(3)].flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn wide_without_spacer() {
let size = TermSize::new(2, 2);
let mut term = Term::new(Config::default(), &size, ());
term.grid[Line(0)][Column(0)].c = 'x';
term.grid[Line(0)][Column(1)].c = '字';
term.grid[Line(0)][Column(1)].flags = Flags::WIDE_CHAR;
let mut regex = RegexSearch::new("test").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
let mut iter = RegexIter::new(start, end, Direction::Right, &term, &mut regex);
assert_eq!(iter.next(), None);
}
#[test]
fn wrap_around_to_another_end() {
#[rustfmt::skip]
let term = mock_term("\
abc\r\n\
def\
");
// Bottom to top.
let mut regex = RegexSearch::new("abc").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(2));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
// Top to bottom.
let mut regex = RegexSearch::new("def").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nfa_compile_error() {
assert!(RegexSearch::new("[0-9A-Za-z]{9999999}").is_err());
}
#[test]
fn runtime_cache_error() {
let term = mock_term(&str::repeat("i", 9999));
let mut regex = RegexSearch::new("[0-9A-Za-z]{9999}").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(9999));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn greed_is_good() {
#[rustfmt::skip]
let term = mock_term("https://github.com");
// Bottom to top.
let mut regex = RegexSearch::new("/github.com|https://github.com").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(17));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn anchored_empty() {
#[rustfmt::skip]
let term = mock_term("rust");
// Bottom to top.
let mut regex = RegexSearch::new(";*|rust").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn newline_breaking_semantic() {
#[rustfmt::skip]
let term = mock_term("\
test abc\r\n\
def test\
");
// Start at last character.
let start = term.semantic_search_left(Point::new(Line(0), Column(7)));
let end = term.semantic_search_right(Point::new(Line(0), Column(7)));
assert_eq!(start, Point::new(Line(0), Column(5)));
assert_eq!(end, Point::new(Line(0), Column(7)));
// Start at first character.
let start = term.semantic_search_left(Point::new(Line(1), Column(0)));
let end = term.semantic_search_right(Point::new(Line(1), Column(0)));
assert_eq!(start, Point::new(Line(1), Column(0)));
assert_eq!(end, Point::new(Line(1), Column(2)));
}
#[test]
fn inline_word_search() {
#[rustfmt::skip]
let term = mock_term("\
word word word word w\n\
ord word word word\
");
let mut regex = RegexSearch::new("word").unwrap();
let start = Point::new(Line(1), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(20));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn fullwidth_semantic() {
#[rustfmt::skip]
let mut term = mock_term("test-x-test");
term.config.semantic_escape_chars = "-".into();
let start = term.semantic_search_left(Point::new(Line(0), Column(6)));
let end = term.semantic_search_right(Point::new(Line(0), Column(6)));
assert_eq!(start, Point::new(Line(0), Column(6)));
assert_eq!(end, Point::new(Line(0), Column(6)));
}
#[test]
fn fullwidth_across_lines() {
let term = mock_term("a🦇\n🦇b");
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn fullwidth_into_halfwidth_across_lines() {
let term = mock_term("a🦇\nxab");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn no_spacer_fullwidth_linewrap() {
let mut term = mock_term("abY\nxab");
term.grid_mut()[Line(0)][Column(2)].c = '🦇';
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct RegexSearch {\n left_fdfa: LazyDfa,\n left_rdfa: LazyDfa,\n right_rdfa: LazyDfa,\n right_fdfa: LazyDfa,\n}"
],
"name": "regex",
"type": "&mut RegexSearch"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "origin",
"type": "Point"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "side",
"type": "Side"
},
{
"definitions": [
"pub enum Option<T> {\n /// No value.\n #[lang = \"None\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n None,\n /// Some value of type `T`.\n #[lang = \"Some\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n Some(#[stable(feature = \"rust1\", since = \"1.0.0\")] T),\n}"
],
"name": "max_lines",
"type": "Option<usize>"
}
],
"end_line": 176,
"name": "next_match_right",
"signature": "fn next_match_right(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match>",
"start_line": 140
} | {
"class_name": "impl<T> Term<T> {\n /// Get next search match in the specified direction.\n pub fn search_next(\n &self,\n regex: &mut RegexSearch,\n mut origin: Point,\n direction: Direction,\n side: Side,\n mut max_lines: Option<usize>,\n ) -> Option<Match> {\n origin = self.expand_wide(origin, direction);\n\n max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());\n\n match direction {\n Direction::Right => self.next_match_right(regex, origin, side, max_lines),\n Direction::Left => self.next_match_left(regex, origin, side, max_lines),\n }\n }\n\n /// Find the next match to the right of the origin.\n fn next_match_right(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_left(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line + max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, self.last_column())\n },\n _ => end.sub(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line < start.line\n || match_point.line > origin.line\n || (match_point.line == origin.line && match_point.column >= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Find the next match to the left of the origin.\n fn next_match_left(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_right(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line - max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, Column(0))\n },\n _ => end.add(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line > start.line\n || match_point.line < origin.line\n || (match_point.line == origin.line && match_point.column <= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Get the side of a match.\n fn match_side(regex_match: &Match, side: Side) -> Point {\n match side {\n Side::Right => *regex_match.end(),\n Side::Left => *regex_match.start(),\n }\n }\n\n /// Find the next regex match to the left of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_left(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;\n let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match to the right of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_right(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;\n let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match.\n ///\n /// This will always return the side of the first match which is farthest from the start point.\n fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {\n match self.regex_search_internal(start, end, regex) {\n Ok(regex_match) => regex_match,\n Err(err) => {\n warn!(\"Regex exceeded complexity limit\");\n debug!(\" {err}\");\n None\n },\n }\n }\n\n /// Find the next regex match.\n ///\n /// To automatically log regex complexity errors, use [`Self::regex_search`] instead.\n fn regex_search_internal(\n &self,\n start: Point,\n end: Point,\n regex: &mut LazyDfa,\n ) -> Result<Option<Point>, Box<dyn Error>> {\n let topmost_line = self.topmost_line();\n let screen_lines = self.screen_lines() as i32;\n let last_column = self.last_column();\n\n // Advance the iterator.\n let next = match regex.direction {\n Direction::Right => GridIterator::next,\n Direction::Left => GridIterator::prev,\n };\n\n // Get start state for the DFA.\n let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };\n let input = Input::new(&[]).anchored(regex_anchored);\n let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();\n\n let mut iter = self.grid.iter_from(start);\n let mut regex_match = None;\n let mut done = false;\n\n let mut cell = iter.cell();\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n let mut c = cell.c;\n let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n let mut point = iter.point();\n let mut last_point = point;\n let mut consumed_bytes = 0;\n\n // Reset the regex state to restart the search.\n macro_rules! reset_state {\n () => {{\n state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;\n consumed_bytes = 0;\n regex_match = None;\n }};\n }\n\n 'outer: loop {\n // Convert char to array of bytes.\n let mut buf = [0; 4];\n let utf8_len = c.encode_utf8(&mut buf).len();\n\n // Pass char to DFA as individual bytes.\n for i in 0..utf8_len {\n // Inverse byte order when going left.\n let byte = match regex.direction {\n Direction::Right => buf[i],\n Direction::Left => buf[utf8_len - i - 1],\n };\n\n state = regex.dfa.next_state(&mut regex.cache, state, byte)?;\n consumed_bytes += 1;\n\n if i == 0 && state.is_match() {\n // Matches require one additional BYTE of lookahead, so we check the match state\n // for the first byte of every new character to determine if the last character\n // was a match.\n regex_match = Some(last_point);\n } else if state.is_dead() {\n if consumed_bytes == 2 {\n // Reset search if we found an empty match.\n //\n // With an unanchored search, a dead state only occurs after the end of a\n // match has been found. While we want to abort after the first match has\n // ended, we don't want empty matches since we cannot highlight them.\n //\n // So once we encounter an empty match, we reset our parser state and clear\n // the match, effectively starting a new search one character farther than\n // before.\n //\n // An empty match requires consuming `2` bytes, since the first byte will\n // report the match for the empty string, while the second byte then\n // reports the dead state indicating the first character isn't part of the\n // match.\n reset_state!();\n\n // Retry this character if first byte caused failure.\n //\n // After finding an empty match, we want to advance the search start by one\n // character. So if the first character has multiple bytes and the dead\n // state isn't reached at `i == 0`, then we continue with the rest of the\n // loop to advance the parser by one character.\n if i == 0 {\n continue 'outer;\n }\n } else {\n // Abort on dead state.\n break 'outer;\n }\n }\n }\n\n // Stop once we've reached the target point.\n if point == end || done {\n // When reaching the end-of-input, we need to notify the parser that no look-ahead\n // is possible and check for state changes.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(point);\n } else if state.is_dead() && consumed_bytes == 1 {\n // Ignore empty matches.\n regex_match = None;\n }\n\n break;\n }\n\n // Advance grid cell iterator.\n let mut cell = match next(&mut iter) {\n Some(Indexed { cell, .. }) => cell,\n None => {\n // Wrap around to other end of the scrollback buffer.\n let line = topmost_line - point.line + screen_lines - 1;\n let start = Point::new(line, last_column - point.column);\n iter = self.grid.iter_from(start);\n iter.cell()\n },\n };\n\n // Check for completion before potentially skipping over fullwidth characters.\n done = iter.point() == end;\n\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n\n c = cell.c;\n let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n last_point = mem::replace(&mut point, iter.point());\n\n // Handle linebreaks.\n if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)\n || (last_point.column == Column(0) && point.column == last_column && !wrapped)\n {\n // When reaching the end-of-input, we need to notify the parser that no\n // look-ahead is possible and check if the current state is still a match.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(last_point);\n }\n\n match regex_match {\n // Stop if we found a non-empty match before the linebreak.\n Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {\n break;\n },\n _ => reset_state!(),\n }\n }\n\n last_wrapped = wrapped;\n }\n\n Ok(regex_match)\n }\n\n /// Advance a grid iterator over fullwidth characters.\n fn skip_fullwidth<'a>(\n &self,\n iter: &'a mut GridIterator<'_, Cell>,\n cell: &mut &'a Cell,\n direction: Direction,\n ) {\n match direction {\n // In the alternate screen buffer there might not be a wide char spacer after a wide\n // char, so we only advance the iterator when the wide char is not in the last column.\n Direction::Right\n if cell.flags.contains(Flags::WIDE_CHAR)\n && iter.point().column < self.last_column() =>\n {\n iter.next();\n },\n Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.next() {\n *cell = new_cell;\n }\n iter.next();\n },\n Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {\n *cell = new_cell;\n }\n\n let prev = iter.point().sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n iter.prev();\n }\n },\n _ => (),\n }\n }\n\n /// Find next matching bracket.\n pub fn bracket_search(&self, point: Point) -> Option<Point> {\n let start_char = self.grid[point].c;\n\n // Find the matching bracket we're looking for\n let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {\n if open == &start_char {\n Some((true, *close))\n } else if close == &start_char {\n Some((false, *open))\n } else {\n None\n }\n })?;\n\n let mut iter = self.grid.iter_from(point);\n\n // For every character match that equals the starting bracket, we\n // ignore one bracket of the opposite type.\n let mut skip_pairs = 0;\n\n loop {\n // Check the next cell\n let cell = if forward { iter.next() } else { iter.prev() };\n\n // Break if there are no more cells\n let cell = match cell {\n Some(cell) => cell,\n None => break,\n };\n\n // Check if the bracket matches\n if cell.c == end_char && skip_pairs == 0 {\n return Some(cell.point);\n } else if cell.c == start_char {\n skip_pairs += 1;\n } else if cell.c == end_char {\n skip_pairs -= 1;\n }\n }\n\n None\n }\n\n /// Find left end of semantic block.\n #[must_use]\n pub fn semantic_search_left(&self, point: Point) -> Point {\n match self.inline_search_left(point, self.semantic_escape_chars()) {\n // If we found a match, reverse for at least one cell, skipping over wide cell spacers.\n Ok(point) => {\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n self.grid\n .iter_from(point)\n .find(|cell| !cell.flags.intersects(wide_spacer))\n .map_or(point, |cell| cell.point)\n },\n Err(point) => point,\n }\n }\n\n /// Find right end of semantic block.\n #[must_use]\n pub fn semantic_search_right(&self, point: Point) -> Point {\n match self.inline_search_right(point, self.semantic_escape_chars()) {\n Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),\n Err(point) => point,\n }\n }\n\n /// Searching to the left, find the next character contained in `needles`.\n pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let mut iter = self.grid.iter_from(point);\n let last_column = self.columns() - 1;\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n while let Some(cell) = iter.prev() {\n if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n }\n\n Err(point)\n }\n\n /// Searching to the right, find the next character contained in `needles`.\n pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n let last_column = self.columns() - 1;\n\n // Immediately stop if start point in on line break.\n if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {\n return Err(point);\n }\n\n for cell in self.grid.iter_from(point) {\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n\n if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n }\n\n Err(point)\n }\n\n /// Find the beginning of the current line across linewraps.\n pub fn line_search_left(&self, mut point: Point) -> Point {\n while point.line > self.topmost_line()\n && self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line -= 1;\n }\n\n point.column = Column(0);\n\n point\n }\n\n /// Find the end of the current line across linewraps.\n pub fn line_search_right(&self, mut point: Point) -> Point {\n while point.line + 1 < self.screen_lines()\n && self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line += 1;\n }\n\n point.column = self.last_column();\n\n point\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
next_match_left | alacritty-master/alacritty_terminal/src/term/search.rs | fn next_match_left(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_right(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line - max_lines).grid_clamp(self, Boundary::None);
Point::new(line, Column(0))
},
_ => end.add(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line > start.line
|| match_point.line < origin.line
|| (match_point.line == origin.line && match_point.column <= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
} | use std::cmp::max;
use std::error::Error;
use std::mem;
use std::ops::RangeInclusive;
use log::{debug, warn};
use regex_automata::hybrid::dfa::{Builder, Cache, Config, DFA};
pub use regex_automata::hybrid::BuildError;
use regex_automata::nfa::thompson::Config as ThompsonConfig;
use regex_automata::util::syntax::Config as SyntaxConfig;
use regex_automata::{Anchored, Input, MatchKind};
use crate::grid::{BidirectionalIterator, Dimensions, GridIterator, Indexed};
use crate::index::{Boundary, Column, Direction, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
/// Used to match equal brackets, when performing a bracket-pair selection.
const BRACKET_PAIRS: [(char, char); 4] = [('(', ')'), ('[', ']'), ('{', '}'), ('<', '>')];
pub type Match = RangeInclusive<Point>;
/// Terminal regex search state.
#[derive(Clone, Debug)]
pub struct RegexSearch {
left_fdfa: LazyDfa,
left_rdfa: LazyDfa,
right_rdfa: LazyDfa,
right_fdfa: LazyDfa,
}
impl RegexSearch {
/// Build the forward and backward search DFAs.
pub fn new(search: &str) -> Result<RegexSearch, Box<BuildError>> {
// Setup configs for both DFA directions.
//
// Bounds are based on Regex's meta engine:
// https://github.com/rust-lang/regex/blob/061ee815ef2c44101dba7b0b124600fcb03c1912/regex-automata/src/meta/wrappers.rs#L581-L599
let has_uppercase = search.chars().any(|c| c.is_uppercase());
let syntax_config = SyntaxConfig::new().case_insensitive(!has_uppercase);
let config =
Config::new().minimum_cache_clear_count(Some(3)).minimum_bytes_per_state(Some(10));
let max_size = config.get_cache_capacity();
let thompson_config = ThompsonConfig::new().nfa_size_limit(Some(max_size));
// Create DFAs to find start/end in right-to-left search.
let left_rdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
true,
)?;
let has_empty = left_rdfa.dfa.get_nfa().has_empty();
let left_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Left,
has_empty,
)?;
// Create DFAs to find start/end in left-to-right search.
let right_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
has_empty,
)?;
let right_rdfa =
LazyDfa::new(search, config, syntax_config, thompson_config, Direction::Left, true)?;
Ok(RegexSearch { left_fdfa, left_rdfa, right_fdfa, right_rdfa })
}
}
/// Runtime-evaluated DFA.
#[derive(Clone, Debug)]
struct LazyDfa {
dfa: DFA,
cache: Cache,
direction: Direction,
match_all: bool,
}
impl LazyDfa {
fn new(
search: &str,
mut config: Config,
syntax: SyntaxConfig,
mut thompson: ThompsonConfig,
direction: Direction,
match_all: bool,
) -> Result<Self, Box<BuildError>> {
thompson = match direction {
Direction::Left => thompson.reverse(true),
Direction::Right => thompson.reverse(false),
};
config = if match_all {
config.match_kind(MatchKind::All)
} else {
config.match_kind(MatchKind::LeftmostFirst)
};
// Create the DFA.
let dfa =
Builder::new().configure(config).syntax(syntax).thompson(thompson).build(search)?;
let cache = dfa.create_cache();
Ok(Self { direction, cache, dfa, match_all })
}
}
impl<T> Term<T> {
/// Get next search match in the specified direction.
pub fn search_next(
&self,
regex: &mut RegexSearch,
mut origin: Point,
direction: Direction,
side: Side,
mut max_lines: Option<usize>,
) -> Option<Match> {
origin = self.expand_wide(origin, direction);
max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());
match direction {
Direction::Right => self.next_match_right(regex, origin, side, max_lines),
Direction::Left => self.next_match_left(regex, origin, side, max_lines),
}
}
/// Find the next match to the right of the origin.
fn next_match_right(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_left(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line + max_lines).grid_clamp(self, Boundary::None);
Point::new(line, self.last_column())
},
_ => end.sub(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line < start.line
|| match_point.line > origin.line
|| (match_point.line == origin.line && match_point.column >= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Find the next match to the left of the origin.
fn next_match_left(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_right(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line - max_lines).grid_clamp(self, Boundary::None);
Point::new(line, Column(0))
},
_ => end.add(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line > start.line
|| match_point.line < origin.line
|| (match_point.line == origin.line && match_point.column <= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Get the side of a match.
fn match_side(regex_match: &Match, side: Side) -> Point {
match side {
Side::Right => *regex_match.end(),
Side::Left => *regex_match.start(),
}
}
/// Find the next regex match to the left of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_left(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;
let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match to the right of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_right(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;
let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match.
///
/// This will always return the side of the first match which is farthest from the start point.
fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {
match self.regex_search_internal(start, end, regex) {
Ok(regex_match) => regex_match,
Err(err) => {
warn!("Regex exceeded complexity limit");
debug!(" {err}");
None
},
}
}
/// Find the next regex match.
///
/// To automatically log regex complexity errors, use [`Self::regex_search`] instead.
fn regex_search_internal(
&self,
start: Point,
end: Point,
regex: &mut LazyDfa,
) -> Result<Option<Point>, Box<dyn Error>> {
let topmost_line = self.topmost_line();
let screen_lines = self.screen_lines() as i32;
let last_column = self.last_column();
// Advance the iterator.
let next = match regex.direction {
Direction::Right => GridIterator::next,
Direction::Left => GridIterator::prev,
};
// Get start state for the DFA.
let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };
let input = Input::new(&[]).anchored(regex_anchored);
let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();
let mut iter = self.grid.iter_from(start);
let mut regex_match = None;
let mut done = false;
let mut cell = iter.cell();
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
let mut c = cell.c;
let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
let mut point = iter.point();
let mut last_point = point;
let mut consumed_bytes = 0;
// Reset the regex state to restart the search.
macro_rules! reset_state {
() => {{
state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;
consumed_bytes = 0;
regex_match = None;
}};
}
'outer: loop {
// Convert char to array of bytes.
let mut buf = [0; 4];
let utf8_len = c.encode_utf8(&mut buf).len();
// Pass char to DFA as individual bytes.
for i in 0..utf8_len {
// Inverse byte order when going left.
let byte = match regex.direction {
Direction::Right => buf[i],
Direction::Left => buf[utf8_len - i - 1],
};
state = regex.dfa.next_state(&mut regex.cache, state, byte)?;
consumed_bytes += 1;
if i == 0 && state.is_match() {
// Matches require one additional BYTE of lookahead, so we check the match state
// for the first byte of every new character to determine if the last character
// was a match.
regex_match = Some(last_point);
} else if state.is_dead() {
if consumed_bytes == 2 {
// Reset search if we found an empty match.
//
// With an unanchored search, a dead state only occurs after the end of a
// match has been found. While we want to abort after the first match has
// ended, we don't want empty matches since we cannot highlight them.
//
// So once we encounter an empty match, we reset our parser state and clear
// the match, effectively starting a new search one character farther than
// before.
//
// An empty match requires consuming `2` bytes, since the first byte will
// report the match for the empty string, while the second byte then
// reports the dead state indicating the first character isn't part of the
// match.
reset_state!();
// Retry this character if first byte caused failure.
//
// After finding an empty match, we want to advance the search start by one
// character. So if the first character has multiple bytes and the dead
// state isn't reached at `i == 0`, then we continue with the rest of the
// loop to advance the parser by one character.
if i == 0 {
continue 'outer;
}
} else {
// Abort on dead state.
break 'outer;
}
}
}
// Stop once we've reached the target point.
if point == end || done {
// When reaching the end-of-input, we need to notify the parser that no look-ahead
// is possible and check for state changes.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(point);
} else if state.is_dead() && consumed_bytes == 1 {
// Ignore empty matches.
regex_match = None;
}
break;
}
// Advance grid cell iterator.
let mut cell = match next(&mut iter) {
Some(Indexed { cell, .. }) => cell,
None => {
// Wrap around to other end of the scrollback buffer.
let line = topmost_line - point.line + screen_lines - 1;
let start = Point::new(line, last_column - point.column);
iter = self.grid.iter_from(start);
iter.cell()
},
};
// Check for completion before potentially skipping over fullwidth characters.
done = iter.point() == end;
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
c = cell.c;
let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
last_point = mem::replace(&mut point, iter.point());
// Handle linebreaks.
if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)
|| (last_point.column == Column(0) && point.column == last_column && !wrapped)
{
// When reaching the end-of-input, we need to notify the parser that no
// look-ahead is possible and check if the current state is still a match.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(last_point);
}
match regex_match {
// Stop if we found a non-empty match before the linebreak.
Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {
break;
},
_ => reset_state!(),
}
}
last_wrapped = wrapped;
}
Ok(regex_match)
}
/// Advance a grid iterator over fullwidth characters.
fn skip_fullwidth<'a>(
&self,
iter: &'a mut GridIterator<'_, Cell>,
cell: &mut &'a Cell,
direction: Direction,
) {
match direction {
// In the alternate screen buffer there might not be a wide char spacer after a wide
// char, so we only advance the iterator when the wide char is not in the last column.
Direction::Right
if cell.flags.contains(Flags::WIDE_CHAR)
&& iter.point().column < self.last_column() =>
{
iter.next();
},
Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.next() {
*cell = new_cell;
}
iter.next();
},
Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {
*cell = new_cell;
}
let prev = iter.point().sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
iter.prev();
}
},
_ => (),
}
}
/// Find next matching bracket.
pub fn bracket_search(&self, point: Point) -> Option<Point> {
let start_char = self.grid[point].c;
// Find the matching bracket we're looking for
let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {
if open == &start_char {
Some((true, *close))
} else if close == &start_char {
Some((false, *open))
} else {
None
}
})?;
let mut iter = self.grid.iter_from(point);
// For every character match that equals the starting bracket, we
// ignore one bracket of the opposite type.
let mut skip_pairs = 0;
loop {
// Check the next cell
let cell = if forward { iter.next() } else { iter.prev() };
// Break if there are no more cells
let cell = match cell {
Some(cell) => cell,
None => break,
};
// Check if the bracket matches
if cell.c == end_char && skip_pairs == 0 {
return Some(cell.point);
} else if cell.c == start_char {
skip_pairs += 1;
} else if cell.c == end_char {
skip_pairs -= 1;
}
}
None
}
/// Find left end of semantic block.
#[must_use]
pub fn semantic_search_left(&self, point: Point) -> Point {
match self.inline_search_left(point, self.semantic_escape_chars()) {
// If we found a match, reverse for at least one cell, skipping over wide cell spacers.
Ok(point) => {
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
self.grid
.iter_from(point)
.find(|cell| !cell.flags.intersects(wide_spacer))
.map_or(point, |cell| cell.point)
},
Err(point) => point,
}
}
/// Find right end of semantic block.
#[must_use]
pub fn semantic_search_right(&self, point: Point) -> Point {
match self.inline_search_right(point, self.semantic_escape_chars()) {
Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),
Err(point) => point,
}
}
/// Searching to the left, find the next character contained in `needles`.
pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let mut iter = self.grid.iter_from(point);
let last_column = self.columns() - 1;
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
while let Some(cell) = iter.prev() {
if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
}
Err(point)
}
/// Searching to the right, find the next character contained in `needles`.
pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
let last_column = self.columns() - 1;
// Immediately stop if start point in on line break.
if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {
return Err(point);
}
for cell in self.grid.iter_from(point) {
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
}
Err(point)
}
/// Find the beginning of the current line across linewraps.
pub fn line_search_left(&self, mut point: Point) -> Point {
while point.line > self.topmost_line()
&& self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line -= 1;
}
point.column = Column(0);
point
}
/// Find the end of the current line across linewraps.
pub fn line_search_right(&self, mut point: Point) -> Point {
while point.line + 1 < self.screen_lines()
&& self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line += 1;
}
point.column = self.last_column();
point
}
}
/// Iterator over regex matches.
pub struct RegexIter<'a, T> {
point: Point,
end: Point,
direction: Direction,
regex: &'a mut RegexSearch,
term: &'a Term<T>,
done: bool,
}
impl<'a, T> RegexIter<'a, T> {
pub fn new(
start: Point,
end: Point,
direction: Direction,
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> Self {
Self { point: start, done: false, end, direction, term, regex }
}
/// Skip one cell, advancing the origin point to the next one.
fn skip(&mut self) {
self.point = self.term.expand_wide(self.point, self.direction);
self.point = match self.direction {
Direction::Right => self.point.add(self.term, Boundary::None, 1),
Direction::Left => self.point.sub(self.term, Boundary::None, 1),
};
}
/// Get the next match in the specified direction.
fn next_match(&mut self) -> Option<Match> {
match self.direction {
Direction::Right => self.term.regex_search_right(self.regex, self.point, self.end),
Direction::Left => self.term.regex_search_left(self.regex, self.point, self.end),
}
}
}
impl<T> Iterator for RegexIter<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
return None;
}
// Since the end itself might be a single cell match, we search one more time.
if self.point == self.end {
self.done = true;
}
let regex_match = self.next_match()?;
self.point = *regex_match.end();
if self.point == self.end {
// Stop when the match terminates right on the end limit.
self.done = true;
} else {
// Move the new search origin past the match.
self.skip();
}
Some(regex_match)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Line};
use crate::term::test::{mock_term, TermSize};
use crate::term::Config;
#[test]
fn regex_right() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(4), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn regex_left() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(4), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nested_regex() {
#[rustfmt::skip]
let term = mock_term("\
Ala -> Alacritty -> critty\r\n\
critty\
");
// Greedy stopped at linebreak.
let mut regex = RegexSearch::new("Ala.*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(25));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
// Greedy stopped at dead state.
let mut regex = RegexSearch::new("Ala[^y]*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(15));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn no_match_right() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(2), Column(4));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn no_match_left() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(2), Column(4));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), None);
}
#[test]
fn include_linebreak_left() {
#[rustfmt::skip]
let term = mock_term("\
testing123\r\n\
xxx\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(9));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn include_linebreak_right() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
testing123\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(9));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
}
#[test]
fn skip_dead_cell() {
let term = mock_term("alacritty");
// Make sure dead state cell is skipped when reversing.
let mut regex = RegexSearch::new("alacrit").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(6));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn reverse_search_dead_recovery() {
let term = mock_term("zooo lense");
// Make sure the reverse DFA operates the same as a forward DFA.
let mut regex = RegexSearch::new("zoo").unwrap();
let start = Point::new(Line(0), Column(9));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multibyte_unicode() {
let term = mock_term("testвосибing");
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(11));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(11));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_multibyte_unicode() {
let term = mock_term("testвосиб");
let mut regex = RegexSearch::new("te.*и").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(8));
let match_end = Point::new(Line(0), Column(7));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn fullwidth() {
let term = mock_term("a🦇x🦇");
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(5));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn singlecell_fullwidth() {
let term = mock_term("🦇");
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(1));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_fullwidth() {
let term = mock_term("jarr🦇");
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
// Ensure ending without a match doesn't loop indefinitely.
let mut regex = RegexSearch::new("x").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
let mut regex = RegexSearch::new("x").unwrap();
let match_end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, match_end), None);
// Ensure match is captured when only partially inside range.
let mut regex = RegexSearch::new("jarr🦇").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn wrapping() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
xxx\
");
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=match_end));
}
#[test]
fn wrapping_into_fullwidth() {
#[rustfmt::skip]
let term = mock_term("\
🦇xx\r\n\
xx🦇\
");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multiline() {
#[rustfmt::skip]
let term = mock_term("\
test \r\n\
test\
");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
let match_start = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match() {
#[rustfmt::skip]
let term = mock_term(" abc ");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match_multibyte() {
#[rustfmt::skip]
let term = mock_term(" ↑");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn empty_match_multiline() {
#[rustfmt::skip]
let term = mock_term("abc \nxxx");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(3));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn leading_spacer() {
#[rustfmt::skip]
let mut term = mock_term("\
xxx \n\
🦇xx\
");
term.grid[Line(0)][Column(3)].flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn wide_without_spacer() {
let size = TermSize::new(2, 2);
let mut term = Term::new(Config::default(), &size, ());
term.grid[Line(0)][Column(0)].c = 'x';
term.grid[Line(0)][Column(1)].c = '字';
term.grid[Line(0)][Column(1)].flags = Flags::WIDE_CHAR;
let mut regex = RegexSearch::new("test").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
let mut iter = RegexIter::new(start, end, Direction::Right, &term, &mut regex);
assert_eq!(iter.next(), None);
}
#[test]
fn wrap_around_to_another_end() {
#[rustfmt::skip]
let term = mock_term("\
abc\r\n\
def\
");
// Bottom to top.
let mut regex = RegexSearch::new("abc").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(2));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
// Top to bottom.
let mut regex = RegexSearch::new("def").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nfa_compile_error() {
assert!(RegexSearch::new("[0-9A-Za-z]{9999999}").is_err());
}
#[test]
fn runtime_cache_error() {
let term = mock_term(&str::repeat("i", 9999));
let mut regex = RegexSearch::new("[0-9A-Za-z]{9999}").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(9999));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn greed_is_good() {
#[rustfmt::skip]
let term = mock_term("https://github.com");
// Bottom to top.
let mut regex = RegexSearch::new("/github.com|https://github.com").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(17));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn anchored_empty() {
#[rustfmt::skip]
let term = mock_term("rust");
// Bottom to top.
let mut regex = RegexSearch::new(";*|rust").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn newline_breaking_semantic() {
#[rustfmt::skip]
let term = mock_term("\
test abc\r\n\
def test\
");
// Start at last character.
let start = term.semantic_search_left(Point::new(Line(0), Column(7)));
let end = term.semantic_search_right(Point::new(Line(0), Column(7)));
assert_eq!(start, Point::new(Line(0), Column(5)));
assert_eq!(end, Point::new(Line(0), Column(7)));
// Start at first character.
let start = term.semantic_search_left(Point::new(Line(1), Column(0)));
let end = term.semantic_search_right(Point::new(Line(1), Column(0)));
assert_eq!(start, Point::new(Line(1), Column(0)));
assert_eq!(end, Point::new(Line(1), Column(2)));
}
#[test]
fn inline_word_search() {
#[rustfmt::skip]
let term = mock_term("\
word word word word w\n\
ord word word word\
");
let mut regex = RegexSearch::new("word").unwrap();
let start = Point::new(Line(1), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(20));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn fullwidth_semantic() {
#[rustfmt::skip]
let mut term = mock_term("test-x-test");
term.config.semantic_escape_chars = "-".into();
let start = term.semantic_search_left(Point::new(Line(0), Column(6)));
let end = term.semantic_search_right(Point::new(Line(0), Column(6)));
assert_eq!(start, Point::new(Line(0), Column(6)));
assert_eq!(end, Point::new(Line(0), Column(6)));
}
#[test]
fn fullwidth_across_lines() {
let term = mock_term("a🦇\n🦇b");
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn fullwidth_into_halfwidth_across_lines() {
let term = mock_term("a🦇\nxab");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn no_spacer_fullwidth_linewrap() {
let mut term = mock_term("abY\nxab");
term.grid_mut()[Line(0)][Column(2)].c = '🦇';
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct RegexSearch {\n left_fdfa: LazyDfa,\n left_rdfa: LazyDfa,\n right_rdfa: LazyDfa,\n right_fdfa: LazyDfa,\n}"
],
"name": "regex",
"type": "&mut RegexSearch"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "origin",
"type": "Point"
},
{
"definitions": [
"pub enum Direction {\n Left,\n Right,\n}"
],
"name": "side",
"type": "Side"
},
{
"definitions": [
"pub enum Option<T> {\n /// No value.\n #[lang = \"None\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n None,\n /// Some value of type `T`.\n #[lang = \"Some\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n Some(#[stable(feature = \"rust1\", since = \"1.0.0\")] T),\n}"
],
"name": "max_lines",
"type": "Option<usize>"
}
],
"end_line": 215,
"name": "next_match_left",
"signature": "fn next_match_left(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match>",
"start_line": 179
} | {
"class_name": "impl<T> Term<T> {\n /// Get next search match in the specified direction.\n pub fn search_next(\n &self,\n regex: &mut RegexSearch,\n mut origin: Point,\n direction: Direction,\n side: Side,\n mut max_lines: Option<usize>,\n ) -> Option<Match> {\n origin = self.expand_wide(origin, direction);\n\n max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());\n\n match direction {\n Direction::Right => self.next_match_right(regex, origin, side, max_lines),\n Direction::Left => self.next_match_left(regex, origin, side, max_lines),\n }\n }\n\n /// Find the next match to the right of the origin.\n fn next_match_right(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_left(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line + max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, self.last_column())\n },\n _ => end.sub(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line < start.line\n || match_point.line > origin.line\n || (match_point.line == origin.line && match_point.column >= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Find the next match to the left of the origin.\n fn next_match_left(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_right(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line - max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, Column(0))\n },\n _ => end.add(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line > start.line\n || match_point.line < origin.line\n || (match_point.line == origin.line && match_point.column <= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Get the side of a match.\n fn match_side(regex_match: &Match, side: Side) -> Point {\n match side {\n Side::Right => *regex_match.end(),\n Side::Left => *regex_match.start(),\n }\n }\n\n /// Find the next regex match to the left of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_left(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;\n let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match to the right of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_right(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;\n let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match.\n ///\n /// This will always return the side of the first match which is farthest from the start point.\n fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {\n match self.regex_search_internal(start, end, regex) {\n Ok(regex_match) => regex_match,\n Err(err) => {\n warn!(\"Regex exceeded complexity limit\");\n debug!(\" {err}\");\n None\n },\n }\n }\n\n /// Find the next regex match.\n ///\n /// To automatically log regex complexity errors, use [`Self::regex_search`] instead.\n fn regex_search_internal(\n &self,\n start: Point,\n end: Point,\n regex: &mut LazyDfa,\n ) -> Result<Option<Point>, Box<dyn Error>> {\n let topmost_line = self.topmost_line();\n let screen_lines = self.screen_lines() as i32;\n let last_column = self.last_column();\n\n // Advance the iterator.\n let next = match regex.direction {\n Direction::Right => GridIterator::next,\n Direction::Left => GridIterator::prev,\n };\n\n // Get start state for the DFA.\n let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };\n let input = Input::new(&[]).anchored(regex_anchored);\n let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();\n\n let mut iter = self.grid.iter_from(start);\n let mut regex_match = None;\n let mut done = false;\n\n let mut cell = iter.cell();\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n let mut c = cell.c;\n let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n let mut point = iter.point();\n let mut last_point = point;\n let mut consumed_bytes = 0;\n\n // Reset the regex state to restart the search.\n macro_rules! reset_state {\n () => {{\n state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;\n consumed_bytes = 0;\n regex_match = None;\n }};\n }\n\n 'outer: loop {\n // Convert char to array of bytes.\n let mut buf = [0; 4];\n let utf8_len = c.encode_utf8(&mut buf).len();\n\n // Pass char to DFA as individual bytes.\n for i in 0..utf8_len {\n // Inverse byte order when going left.\n let byte = match regex.direction {\n Direction::Right => buf[i],\n Direction::Left => buf[utf8_len - i - 1],\n };\n\n state = regex.dfa.next_state(&mut regex.cache, state, byte)?;\n consumed_bytes += 1;\n\n if i == 0 && state.is_match() {\n // Matches require one additional BYTE of lookahead, so we check the match state\n // for the first byte of every new character to determine if the last character\n // was a match.\n regex_match = Some(last_point);\n } else if state.is_dead() {\n if consumed_bytes == 2 {\n // Reset search if we found an empty match.\n //\n // With an unanchored search, a dead state only occurs after the end of a\n // match has been found. While we want to abort after the first match has\n // ended, we don't want empty matches since we cannot highlight them.\n //\n // So once we encounter an empty match, we reset our parser state and clear\n // the match, effectively starting a new search one character farther than\n // before.\n //\n // An empty match requires consuming `2` bytes, since the first byte will\n // report the match for the empty string, while the second byte then\n // reports the dead state indicating the first character isn't part of the\n // match.\n reset_state!();\n\n // Retry this character if first byte caused failure.\n //\n // After finding an empty match, we want to advance the search start by one\n // character. So if the first character has multiple bytes and the dead\n // state isn't reached at `i == 0`, then we continue with the rest of the\n // loop to advance the parser by one character.\n if i == 0 {\n continue 'outer;\n }\n } else {\n // Abort on dead state.\n break 'outer;\n }\n }\n }\n\n // Stop once we've reached the target point.\n if point == end || done {\n // When reaching the end-of-input, we need to notify the parser that no look-ahead\n // is possible and check for state changes.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(point);\n } else if state.is_dead() && consumed_bytes == 1 {\n // Ignore empty matches.\n regex_match = None;\n }\n\n break;\n }\n\n // Advance grid cell iterator.\n let mut cell = match next(&mut iter) {\n Some(Indexed { cell, .. }) => cell,\n None => {\n // Wrap around to other end of the scrollback buffer.\n let line = topmost_line - point.line + screen_lines - 1;\n let start = Point::new(line, last_column - point.column);\n iter = self.grid.iter_from(start);\n iter.cell()\n },\n };\n\n // Check for completion before potentially skipping over fullwidth characters.\n done = iter.point() == end;\n\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n\n c = cell.c;\n let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n last_point = mem::replace(&mut point, iter.point());\n\n // Handle linebreaks.\n if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)\n || (last_point.column == Column(0) && point.column == last_column && !wrapped)\n {\n // When reaching the end-of-input, we need to notify the parser that no\n // look-ahead is possible and check if the current state is still a match.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(last_point);\n }\n\n match regex_match {\n // Stop if we found a non-empty match before the linebreak.\n Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {\n break;\n },\n _ => reset_state!(),\n }\n }\n\n last_wrapped = wrapped;\n }\n\n Ok(regex_match)\n }\n\n /// Advance a grid iterator over fullwidth characters.\n fn skip_fullwidth<'a>(\n &self,\n iter: &'a mut GridIterator<'_, Cell>,\n cell: &mut &'a Cell,\n direction: Direction,\n ) {\n match direction {\n // In the alternate screen buffer there might not be a wide char spacer after a wide\n // char, so we only advance the iterator when the wide char is not in the last column.\n Direction::Right\n if cell.flags.contains(Flags::WIDE_CHAR)\n && iter.point().column < self.last_column() =>\n {\n iter.next();\n },\n Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.next() {\n *cell = new_cell;\n }\n iter.next();\n },\n Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {\n *cell = new_cell;\n }\n\n let prev = iter.point().sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n iter.prev();\n }\n },\n _ => (),\n }\n }\n\n /// Find next matching bracket.\n pub fn bracket_search(&self, point: Point) -> Option<Point> {\n let start_char = self.grid[point].c;\n\n // Find the matching bracket we're looking for\n let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {\n if open == &start_char {\n Some((true, *close))\n } else if close == &start_char {\n Some((false, *open))\n } else {\n None\n }\n })?;\n\n let mut iter = self.grid.iter_from(point);\n\n // For every character match that equals the starting bracket, we\n // ignore one bracket of the opposite type.\n let mut skip_pairs = 0;\n\n loop {\n // Check the next cell\n let cell = if forward { iter.next() } else { iter.prev() };\n\n // Break if there are no more cells\n let cell = match cell {\n Some(cell) => cell,\n None => break,\n };\n\n // Check if the bracket matches\n if cell.c == end_char && skip_pairs == 0 {\n return Some(cell.point);\n } else if cell.c == start_char {\n skip_pairs += 1;\n } else if cell.c == end_char {\n skip_pairs -= 1;\n }\n }\n\n None\n }\n\n /// Find left end of semantic block.\n #[must_use]\n pub fn semantic_search_left(&self, point: Point) -> Point {\n match self.inline_search_left(point, self.semantic_escape_chars()) {\n // If we found a match, reverse for at least one cell, skipping over wide cell spacers.\n Ok(point) => {\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n self.grid\n .iter_from(point)\n .find(|cell| !cell.flags.intersects(wide_spacer))\n .map_or(point, |cell| cell.point)\n },\n Err(point) => point,\n }\n }\n\n /// Find right end of semantic block.\n #[must_use]\n pub fn semantic_search_right(&self, point: Point) -> Point {\n match self.inline_search_right(point, self.semantic_escape_chars()) {\n Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),\n Err(point) => point,\n }\n }\n\n /// Searching to the left, find the next character contained in `needles`.\n pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let mut iter = self.grid.iter_from(point);\n let last_column = self.columns() - 1;\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n while let Some(cell) = iter.prev() {\n if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n }\n\n Err(point)\n }\n\n /// Searching to the right, find the next character contained in `needles`.\n pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n let last_column = self.columns() - 1;\n\n // Immediately stop if start point in on line break.\n if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {\n return Err(point);\n }\n\n for cell in self.grid.iter_from(point) {\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n\n if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n }\n\n Err(point)\n }\n\n /// Find the beginning of the current line across linewraps.\n pub fn line_search_left(&self, mut point: Point) -> Point {\n while point.line > self.topmost_line()\n && self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line -= 1;\n }\n\n point.column = Column(0);\n\n point\n }\n\n /// Find the end of the current line across linewraps.\n pub fn line_search_right(&self, mut point: Point) -> Point {\n while point.line + 1 < self.screen_lines()\n && self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line += 1;\n }\n\n point.column = self.last_column();\n\n point\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
bracket_search | alacritty-master/alacritty_terminal/src/term/search.rs | pub fn bracket_search(&self, point: Point) -> Option<Point> {
let start_char = self.grid[point].c;
// Find the matching bracket we're looking for
let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {
if open == &start_char {
Some((true, *close))
} else if close == &start_char {
Some((false, *open))
} else {
None
}
})?;
let mut iter = self.grid.iter_from(point);
// For every character match that equals the starting bracket, we
// ignore one bracket of the opposite type.
let mut skip_pairs = 0;
loop {
// Check the next cell
let cell = if forward { iter.next() } else { iter.prev() };
// Break if there are no more cells
let cell = match cell {
Some(cell) => cell,
None => break,
};
// Check if the bracket matches
if cell.c == end_char && skip_pairs == 0 {
return Some(cell.point);
} else if cell.c == start_char {
skip_pairs += 1;
} else if cell.c == end_char {
skip_pairs -= 1;
}
}
None
} | use std::cmp::max;
use std::error::Error;
use std::mem;
use std::ops::RangeInclusive;
use log::{debug, warn};
use regex_automata::hybrid::dfa::{Builder, Cache, Config, DFA};
pub use regex_automata::hybrid::BuildError;
use regex_automata::nfa::thompson::Config as ThompsonConfig;
use regex_automata::util::syntax::Config as SyntaxConfig;
use regex_automata::{Anchored, Input, MatchKind};
use crate::grid::{BidirectionalIterator, Dimensions, GridIterator, Indexed};
use crate::index::{Boundary, Column, Direction, Point, Side};
use crate::term::cell::{Cell, Flags};
use crate::term::Term;
/// Used to match equal brackets, when performing a bracket-pair selection.
const BRACKET_PAIRS: [(char, char); 4] = [('(', ')'), ('[', ']'), ('{', '}'), ('<', '>')];
pub type Match = RangeInclusive<Point>;
/// Terminal regex search state.
#[derive(Clone, Debug)]
pub struct RegexSearch {
left_fdfa: LazyDfa,
left_rdfa: LazyDfa,
right_rdfa: LazyDfa,
right_fdfa: LazyDfa,
}
impl RegexSearch {
/// Build the forward and backward search DFAs.
pub fn new(search: &str) -> Result<RegexSearch, Box<BuildError>> {
// Setup configs for both DFA directions.
//
// Bounds are based on Regex's meta engine:
// https://github.com/rust-lang/regex/blob/061ee815ef2c44101dba7b0b124600fcb03c1912/regex-automata/src/meta/wrappers.rs#L581-L599
let has_uppercase = search.chars().any(|c| c.is_uppercase());
let syntax_config = SyntaxConfig::new().case_insensitive(!has_uppercase);
let config =
Config::new().minimum_cache_clear_count(Some(3)).minimum_bytes_per_state(Some(10));
let max_size = config.get_cache_capacity();
let thompson_config = ThompsonConfig::new().nfa_size_limit(Some(max_size));
// Create DFAs to find start/end in right-to-left search.
let left_rdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
true,
)?;
let has_empty = left_rdfa.dfa.get_nfa().has_empty();
let left_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Left,
has_empty,
)?;
// Create DFAs to find start/end in left-to-right search.
let right_fdfa = LazyDfa::new(
search,
config.clone(),
syntax_config,
thompson_config.clone(),
Direction::Right,
has_empty,
)?;
let right_rdfa =
LazyDfa::new(search, config, syntax_config, thompson_config, Direction::Left, true)?;
Ok(RegexSearch { left_fdfa, left_rdfa, right_fdfa, right_rdfa })
}
}
/// Runtime-evaluated DFA.
#[derive(Clone, Debug)]
struct LazyDfa {
dfa: DFA,
cache: Cache,
direction: Direction,
match_all: bool,
}
impl LazyDfa {
fn new(
search: &str,
mut config: Config,
syntax: SyntaxConfig,
mut thompson: ThompsonConfig,
direction: Direction,
match_all: bool,
) -> Result<Self, Box<BuildError>> {
thompson = match direction {
Direction::Left => thompson.reverse(true),
Direction::Right => thompson.reverse(false),
};
config = if match_all {
config.match_kind(MatchKind::All)
} else {
config.match_kind(MatchKind::LeftmostFirst)
};
// Create the DFA.
let dfa =
Builder::new().configure(config).syntax(syntax).thompson(thompson).build(search)?;
let cache = dfa.create_cache();
Ok(Self { direction, cache, dfa, match_all })
}
}
impl<T> Term<T> {
/// Get next search match in the specified direction.
pub fn search_next(
&self,
regex: &mut RegexSearch,
mut origin: Point,
direction: Direction,
side: Side,
mut max_lines: Option<usize>,
) -> Option<Match> {
origin = self.expand_wide(origin, direction);
max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());
match direction {
Direction::Right => self.next_match_right(regex, origin, side, max_lines),
Direction::Left => self.next_match_left(regex, origin, side, max_lines),
}
}
/// Find the next match to the right of the origin.
fn next_match_right(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_left(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line + max_lines).grid_clamp(self, Boundary::None);
Point::new(line, self.last_column())
},
_ => end.sub(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line < start.line
|| match_point.line > origin.line
|| (match_point.line == origin.line && match_point.column >= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Find the next match to the left of the origin.
fn next_match_left(
&self,
regex: &mut RegexSearch,
origin: Point,
side: Side,
max_lines: Option<usize>,
) -> Option<Match> {
let start = self.line_search_right(origin);
let mut end = start;
// Limit maximum number of lines searched.
end = match max_lines {
Some(max_lines) => {
let line = (start.line - max_lines).grid_clamp(self, Boundary::None);
Point::new(line, Column(0))
},
_ => end.add(self, Boundary::None, 1),
};
let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();
// Check if there's any match at all.
let first_match = regex_iter.peek()?.clone();
let regex_match = regex_iter
.find(|regex_match| {
let match_point = Self::match_side(regex_match, side);
// If the match's point is beyond the origin, we're done.
match_point.line > start.line
|| match_point.line < origin.line
|| (match_point.line == origin.line && match_point.column <= origin.column)
})
.unwrap_or(first_match);
Some(regex_match)
}
/// Get the side of a match.
fn match_side(regex_match: &Match, side: Side) -> Point {
match side {
Side::Right => *regex_match.end(),
Side::Left => *regex_match.start(),
}
}
/// Find the next regex match to the left of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_left(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;
let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match to the right of the origin point.
///
/// The origin is always included in the regex.
pub fn regex_search_right(
&self,
regex: &mut RegexSearch,
start: Point,
end: Point,
) -> Option<Match> {
// Find start and end of match.
let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;
let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;
Some(match_start..=match_end)
}
/// Find the next regex match.
///
/// This will always return the side of the first match which is farthest from the start point.
fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {
match self.regex_search_internal(start, end, regex) {
Ok(regex_match) => regex_match,
Err(err) => {
warn!("Regex exceeded complexity limit");
debug!(" {err}");
None
},
}
}
/// Find the next regex match.
///
/// To automatically log regex complexity errors, use [`Self::regex_search`] instead.
fn regex_search_internal(
&self,
start: Point,
end: Point,
regex: &mut LazyDfa,
) -> Result<Option<Point>, Box<dyn Error>> {
let topmost_line = self.topmost_line();
let screen_lines = self.screen_lines() as i32;
let last_column = self.last_column();
// Advance the iterator.
let next = match regex.direction {
Direction::Right => GridIterator::next,
Direction::Left => GridIterator::prev,
};
// Get start state for the DFA.
let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };
let input = Input::new(&[]).anchored(regex_anchored);
let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();
let mut iter = self.grid.iter_from(start);
let mut regex_match = None;
let mut done = false;
let mut cell = iter.cell();
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
let mut c = cell.c;
let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
let mut point = iter.point();
let mut last_point = point;
let mut consumed_bytes = 0;
// Reset the regex state to restart the search.
macro_rules! reset_state {
() => {{
state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;
consumed_bytes = 0;
regex_match = None;
}};
}
'outer: loop {
// Convert char to array of bytes.
let mut buf = [0; 4];
let utf8_len = c.encode_utf8(&mut buf).len();
// Pass char to DFA as individual bytes.
for i in 0..utf8_len {
// Inverse byte order when going left.
let byte = match regex.direction {
Direction::Right => buf[i],
Direction::Left => buf[utf8_len - i - 1],
};
state = regex.dfa.next_state(&mut regex.cache, state, byte)?;
consumed_bytes += 1;
if i == 0 && state.is_match() {
// Matches require one additional BYTE of lookahead, so we check the match state
// for the first byte of every new character to determine if the last character
// was a match.
regex_match = Some(last_point);
} else if state.is_dead() {
if consumed_bytes == 2 {
// Reset search if we found an empty match.
//
// With an unanchored search, a dead state only occurs after the end of a
// match has been found. While we want to abort after the first match has
// ended, we don't want empty matches since we cannot highlight them.
//
// So once we encounter an empty match, we reset our parser state and clear
// the match, effectively starting a new search one character farther than
// before.
//
// An empty match requires consuming `2` bytes, since the first byte will
// report the match for the empty string, while the second byte then
// reports the dead state indicating the first character isn't part of the
// match.
reset_state!();
// Retry this character if first byte caused failure.
//
// After finding an empty match, we want to advance the search start by one
// character. So if the first character has multiple bytes and the dead
// state isn't reached at `i == 0`, then we continue with the rest of the
// loop to advance the parser by one character.
if i == 0 {
continue 'outer;
}
} else {
// Abort on dead state.
break 'outer;
}
}
}
// Stop once we've reached the target point.
if point == end || done {
// When reaching the end-of-input, we need to notify the parser that no look-ahead
// is possible and check for state changes.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(point);
} else if state.is_dead() && consumed_bytes == 1 {
// Ignore empty matches.
regex_match = None;
}
break;
}
// Advance grid cell iterator.
let mut cell = match next(&mut iter) {
Some(Indexed { cell, .. }) => cell,
None => {
// Wrap around to other end of the scrollback buffer.
let line = topmost_line - point.line + screen_lines - 1;
let start = Point::new(line, last_column - point.column);
iter = self.grid.iter_from(start);
iter.cell()
},
};
// Check for completion before potentially skipping over fullwidth characters.
done = iter.point() == end;
self.skip_fullwidth(&mut iter, &mut cell, regex.direction);
c = cell.c;
let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);
last_point = mem::replace(&mut point, iter.point());
// Handle linebreaks.
if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)
|| (last_point.column == Column(0) && point.column == last_column && !wrapped)
{
// When reaching the end-of-input, we need to notify the parser that no
// look-ahead is possible and check if the current state is still a match.
state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;
if state.is_match() {
regex_match = Some(last_point);
}
match regex_match {
// Stop if we found a non-empty match before the linebreak.
Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {
break;
},
_ => reset_state!(),
}
}
last_wrapped = wrapped;
}
Ok(regex_match)
}
/// Advance a grid iterator over fullwidth characters.
fn skip_fullwidth<'a>(
&self,
iter: &'a mut GridIterator<'_, Cell>,
cell: &mut &'a Cell,
direction: Direction,
) {
match direction {
// In the alternate screen buffer there might not be a wide char spacer after a wide
// char, so we only advance the iterator when the wide char is not in the last column.
Direction::Right
if cell.flags.contains(Flags::WIDE_CHAR)
&& iter.point().column < self.last_column() =>
{
iter.next();
},
Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.next() {
*cell = new_cell;
}
iter.next();
},
Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {
if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {
*cell = new_cell;
}
let prev = iter.point().sub(self, Boundary::Grid, 1);
if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {
iter.prev();
}
},
_ => (),
}
}
/// Find next matching bracket.
pub fn bracket_search(&self, point: Point) -> Option<Point> {
let start_char = self.grid[point].c;
// Find the matching bracket we're looking for
let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {
if open == &start_char {
Some((true, *close))
} else if close == &start_char {
Some((false, *open))
} else {
None
}
})?;
let mut iter = self.grid.iter_from(point);
// For every character match that equals the starting bracket, we
// ignore one bracket of the opposite type.
let mut skip_pairs = 0;
loop {
// Check the next cell
let cell = if forward { iter.next() } else { iter.prev() };
// Break if there are no more cells
let cell = match cell {
Some(cell) => cell,
None => break,
};
// Check if the bracket matches
if cell.c == end_char && skip_pairs == 0 {
return Some(cell.point);
} else if cell.c == start_char {
skip_pairs += 1;
} else if cell.c == end_char {
skip_pairs -= 1;
}
}
None
}
/// Find left end of semantic block.
#[must_use]
pub fn semantic_search_left(&self, point: Point) -> Point {
match self.inline_search_left(point, self.semantic_escape_chars()) {
// If we found a match, reverse for at least one cell, skipping over wide cell spacers.
Ok(point) => {
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
self.grid
.iter_from(point)
.find(|cell| !cell.flags.intersects(wide_spacer))
.map_or(point, |cell| cell.point)
},
Err(point) => point,
}
}
/// Find right end of semantic block.
#[must_use]
pub fn semantic_search_right(&self, point: Point) -> Point {
match self.inline_search_right(point, self.semantic_escape_chars()) {
Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),
Err(point) => point,
}
}
/// Searching to the left, find the next character contained in `needles`.
pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let mut iter = self.grid.iter_from(point);
let last_column = self.columns() - 1;
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
while let Some(cell) = iter.prev() {
if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
}
Err(point)
}
/// Searching to the right, find the next character contained in `needles`.
pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {
// Limit the starting point to the last line in the history
point.line = max(point.line, self.topmost_line());
let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;
let last_column = self.columns() - 1;
// Immediately stop if start point in on line break.
if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {
return Err(point);
}
for cell in self.grid.iter_from(point) {
point = cell.point;
if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {
return Ok(point);
}
if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {
break;
}
}
Err(point)
}
/// Find the beginning of the current line across linewraps.
pub fn line_search_left(&self, mut point: Point) -> Point {
while point.line > self.topmost_line()
&& self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line -= 1;
}
point.column = Column(0);
point
}
/// Find the end of the current line across linewraps.
pub fn line_search_right(&self, mut point: Point) -> Point {
while point.line + 1 < self.screen_lines()
&& self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)
{
point.line += 1;
}
point.column = self.last_column();
point
}
}
/// Iterator over regex matches.
pub struct RegexIter<'a, T> {
point: Point,
end: Point,
direction: Direction,
regex: &'a mut RegexSearch,
term: &'a Term<T>,
done: bool,
}
impl<'a, T> RegexIter<'a, T> {
pub fn new(
start: Point,
end: Point,
direction: Direction,
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> Self {
Self { point: start, done: false, end, direction, term, regex }
}
/// Skip one cell, advancing the origin point to the next one.
fn skip(&mut self) {
self.point = self.term.expand_wide(self.point, self.direction);
self.point = match self.direction {
Direction::Right => self.point.add(self.term, Boundary::None, 1),
Direction::Left => self.point.sub(self.term, Boundary::None, 1),
};
}
/// Get the next match in the specified direction.
fn next_match(&mut self) -> Option<Match> {
match self.direction {
Direction::Right => self.term.regex_search_right(self.regex, self.point, self.end),
Direction::Left => self.term.regex_search_left(self.regex, self.point, self.end),
}
}
}
impl<T> Iterator for RegexIter<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
return None;
}
// Since the end itself might be a single cell match, we search one more time.
if self.point == self.end {
self.done = true;
}
let regex_match = self.next_match()?;
self.point = *regex_match.end();
if self.point == self.end {
// Stop when the match terminates right on the end limit.
self.done = true;
} else {
// Move the new search origin past the match.
self.skip();
}
Some(regex_match)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::index::{Column, Line};
use crate::term::test::{mock_term, TermSize};
use crate::term::Config;
#[test]
fn regex_right() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(4), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn regex_left() {
#[rustfmt::skip]
let term = mock_term("\
testing66\r\n\
Alacritty\n\
123\r\n\
Alacritty\r\n\
123\
");
// Check regex across wrapped and unwrapped lines.
let mut regex = RegexSearch::new("Ala.*123").unwrap();
let start = Point::new(Line(4), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(2), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nested_regex() {
#[rustfmt::skip]
let term = mock_term("\
Ala -> Alacritty -> critty\r\n\
critty\
");
// Greedy stopped at linebreak.
let mut regex = RegexSearch::new("Ala.*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(25));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
// Greedy stopped at dead state.
let mut regex = RegexSearch::new("Ala[^y]*critty").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(15));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn no_match_right() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(2), Column(4));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn no_match_left() {
#[rustfmt::skip]
let term = mock_term("\
first line\n\
broken second\r\n\
third\
");
let mut regex = RegexSearch::new("nothing").unwrap();
let start = Point::new(Line(2), Column(4));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), None);
}
#[test]
fn include_linebreak_left() {
#[rustfmt::skip]
let term = mock_term("\
testing123\r\n\
xxx\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(9));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn include_linebreak_right() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
testing123\
");
// Make sure the cell containing the linebreak is not skipped.
let mut regex = RegexSearch::new("te.*123").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(9));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
}
#[test]
fn skip_dead_cell() {
let term = mock_term("alacritty");
// Make sure dead state cell is skipped when reversing.
let mut regex = RegexSearch::new("alacrit").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(6));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn reverse_search_dead_recovery() {
let term = mock_term("zooo lense");
// Make sure the reverse DFA operates the same as a forward DFA.
let mut regex = RegexSearch::new("zoo").unwrap();
let start = Point::new(Line(0), Column(9));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multibyte_unicode() {
let term = mock_term("testвосибing");
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(11));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("te.*ing").unwrap();
let start = Point::new(Line(0), Column(11));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_multibyte_unicode() {
let term = mock_term("testвосиб");
let mut regex = RegexSearch::new("te.*и").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(8));
let match_end = Point::new(Line(0), Column(7));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn fullwidth() {
let term = mock_term("a🦇x🦇");
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("[^ ]*").unwrap();
let start = Point::new(Line(0), Column(5));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn singlecell_fullwidth() {
let term = mock_term("🦇");
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
let mut regex = RegexSearch::new("🦇").unwrap();
let start = Point::new(Line(0), Column(1));
let end = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=start));
}
#[test]
fn end_on_fullwidth() {
let term = mock_term("jarr🦇");
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
// Ensure ending without a match doesn't loop indefinitely.
let mut regex = RegexSearch::new("x").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
let mut regex = RegexSearch::new("x").unwrap();
let match_end = Point::new(Line(0), Column(5));
assert_eq!(term.regex_search_right(&mut regex, start, match_end), None);
// Ensure match is captured when only partially inside range.
let mut regex = RegexSearch::new("jarr🦇").unwrap();
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=match_end));
}
#[test]
fn wrapping() {
#[rustfmt::skip]
let term = mock_term("\
xxx\r\n\
xxx\
");
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new("xxx").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(end..=match_end));
}
#[test]
fn wrapping_into_fullwidth() {
#[rustfmt::skip]
let term = mock_term("\
🦇xx\r\n\
xx🦇\
");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn multiline() {
#[rustfmt::skip]
let term = mock_term("\
test \r\n\
test\
");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
let match_start = Point::new(Line(0), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=end));
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match() {
#[rustfmt::skip]
let term = mock_term(" abc ");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(4));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn empty_match_multibyte() {
#[rustfmt::skip]
let term = mock_term(" ↑");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn empty_match_multiline() {
#[rustfmt::skip]
let term = mock_term("abc \nxxx");
const PATTERN: &str = "[a-z]*";
let mut regex = RegexSearch::new(PATTERN).unwrap();
let start = Point::new(Line(0), Column(3));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn leading_spacer() {
#[rustfmt::skip]
let mut term = mock_term("\
xxx \n\
🦇xx\
");
term.grid[Line(0)][Column(3)].flags.insert(Flags::LEADING_WIDE_CHAR_SPACER);
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(3));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(3));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("x🦇").unwrap();
let start = Point::new(Line(1), Column(3));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn wide_without_spacer() {
let size = TermSize::new(2, 2);
let mut term = Term::new(Config::default(), &size, ());
term.grid[Line(0)][Column(0)].c = 'x';
term.grid[Line(0)][Column(1)].c = '字';
term.grid[Line(0)][Column(1)].flags = Flags::WIDE_CHAR;
let mut regex = RegexSearch::new("test").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(1));
let mut iter = RegexIter::new(start, end, Direction::Right, &term, &mut regex);
assert_eq!(iter.next(), None);
}
#[test]
fn wrap_around_to_another_end() {
#[rustfmt::skip]
let term = mock_term("\
abc\r\n\
def\
");
// Bottom to top.
let mut regex = RegexSearch::new("abc").unwrap();
let start = Point::new(Line(1), Column(0));
let end = Point::new(Line(0), Column(2));
let match_start = Point::new(Line(0), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
// Top to bottom.
let mut regex = RegexSearch::new("def").unwrap();
let start = Point::new(Line(0), Column(2));
let end = Point::new(Line(1), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn nfa_compile_error() {
assert!(RegexSearch::new("[0-9A-Za-z]{9999999}").is_err());
}
#[test]
fn runtime_cache_error() {
let term = mock_term(&str::repeat("i", 9999));
let mut regex = RegexSearch::new("[0-9A-Za-z]{9999}").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(9999));
assert_eq!(term.regex_search_right(&mut regex, start, end), None);
}
#[test]
fn greed_is_good() {
#[rustfmt::skip]
let term = mock_term("https://github.com");
// Bottom to top.
let mut regex = RegexSearch::new("/github.com|https://github.com").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(17));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn anchored_empty() {
#[rustfmt::skip]
let term = mock_term("rust");
// Bottom to top.
let mut regex = RegexSearch::new(";*|rust").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(0), Column(3));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(start..=end));
}
#[test]
fn newline_breaking_semantic() {
#[rustfmt::skip]
let term = mock_term("\
test abc\r\n\
def test\
");
// Start at last character.
let start = term.semantic_search_left(Point::new(Line(0), Column(7)));
let end = term.semantic_search_right(Point::new(Line(0), Column(7)));
assert_eq!(start, Point::new(Line(0), Column(5)));
assert_eq!(end, Point::new(Line(0), Column(7)));
// Start at first character.
let start = term.semantic_search_left(Point::new(Line(1), Column(0)));
let end = term.semantic_search_right(Point::new(Line(1), Column(0)));
assert_eq!(start, Point::new(Line(1), Column(0)));
assert_eq!(end, Point::new(Line(1), Column(2)));
}
#[test]
fn inline_word_search() {
#[rustfmt::skip]
let term = mock_term("\
word word word word w\n\
ord word word word\
");
let mut regex = RegexSearch::new("word").unwrap();
let start = Point::new(Line(1), Column(4));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(0), Column(20));
let match_end = Point::new(Line(1), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_start..=match_end));
}
#[test]
fn fullwidth_semantic() {
#[rustfmt::skip]
let mut term = mock_term("test-x-test");
term.config.semantic_escape_chars = "-".into();
let start = term.semantic_search_left(Point::new(Line(0), Column(6)));
let end = term.semantic_search_right(Point::new(Line(0), Column(6)));
assert_eq!(start, Point::new(Line(0), Column(6)));
assert_eq!(end, Point::new(Line(0), Column(6)));
}
#[test]
fn fullwidth_across_lines() {
let term = mock_term("a🦇\n🦇b");
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(1));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇🦇").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(1));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn fullwidth_into_halfwidth_across_lines() {
let term = mock_term("a🦇\nxab");
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(1));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(1));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
#[test]
fn no_spacer_fullwidth_linewrap() {
let mut term = mock_term("abY\nxab");
term.grid_mut()[Line(0)][Column(2)].c = '🦇';
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(0), Column(0));
let end = Point::new(Line(1), Column(2));
let match_start = Point::new(Line(0), Column(2));
let match_end = Point::new(Line(1), Column(0));
assert_eq!(term.regex_search_right(&mut regex, start, end), Some(match_start..=match_end));
let mut regex = RegexSearch::new("🦇x").unwrap();
let start = Point::new(Line(1), Column(2));
let end = Point::new(Line(0), Column(0));
let match_start = Point::new(Line(1), Column(0));
let match_end = Point::new(Line(0), Column(2));
assert_eq!(term.regex_search_left(&mut regex, start, end), Some(match_end..=match_start));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 513,
"name": "bracket_search",
"signature": "pub fn bracket_search(&self, point: Point) -> Option<Point>",
"start_line": 472
} | {
"class_name": "impl<T> Term<T> {\n /// Get next search match in the specified direction.\n pub fn search_next(\n &self,\n regex: &mut RegexSearch,\n mut origin: Point,\n direction: Direction,\n side: Side,\n mut max_lines: Option<usize>,\n ) -> Option<Match> {\n origin = self.expand_wide(origin, direction);\n\n max_lines = max_lines.filter(|max_lines| max_lines + 1 < self.total_lines());\n\n match direction {\n Direction::Right => self.next_match_right(regex, origin, side, max_lines),\n Direction::Left => self.next_match_left(regex, origin, side, max_lines),\n }\n }\n\n /// Find the next match to the right of the origin.\n fn next_match_right(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_left(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line + max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, self.last_column())\n },\n _ => end.sub(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Right, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line < start.line\n || match_point.line > origin.line\n || (match_point.line == origin.line && match_point.column >= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Find the next match to the left of the origin.\n fn next_match_left(\n &self,\n regex: &mut RegexSearch,\n origin: Point,\n side: Side,\n max_lines: Option<usize>,\n ) -> Option<Match> {\n let start = self.line_search_right(origin);\n let mut end = start;\n\n // Limit maximum number of lines searched.\n end = match max_lines {\n Some(max_lines) => {\n let line = (start.line - max_lines).grid_clamp(self, Boundary::None);\n Point::new(line, Column(0))\n },\n _ => end.add(self, Boundary::None, 1),\n };\n\n let mut regex_iter = RegexIter::new(start, end, Direction::Left, self, regex).peekable();\n\n // Check if there's any match at all.\n let first_match = regex_iter.peek()?.clone();\n\n let regex_match = regex_iter\n .find(|regex_match| {\n let match_point = Self::match_side(regex_match, side);\n\n // If the match's point is beyond the origin, we're done.\n match_point.line > start.line\n || match_point.line < origin.line\n || (match_point.line == origin.line && match_point.column <= origin.column)\n })\n .unwrap_or(first_match);\n\n Some(regex_match)\n }\n\n /// Get the side of a match.\n fn match_side(regex_match: &Match, side: Side) -> Point {\n match side {\n Side::Right => *regex_match.end(),\n Side::Left => *regex_match.start(),\n }\n }\n\n /// Find the next regex match to the left of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_left(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_start = self.regex_search(start, end, &mut regex.left_fdfa)?;\n let match_end = self.regex_search(match_start, start, &mut regex.left_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match to the right of the origin point.\n ///\n /// The origin is always included in the regex.\n pub fn regex_search_right(\n &self,\n regex: &mut RegexSearch,\n start: Point,\n end: Point,\n ) -> Option<Match> {\n // Find start and end of match.\n let match_end = self.regex_search(start, end, &mut regex.right_fdfa)?;\n let match_start = self.regex_search(match_end, start, &mut regex.right_rdfa)?;\n\n Some(match_start..=match_end)\n }\n\n /// Find the next regex match.\n ///\n /// This will always return the side of the first match which is farthest from the start point.\n fn regex_search(&self, start: Point, end: Point, regex: &mut LazyDfa) -> Option<Point> {\n match self.regex_search_internal(start, end, regex) {\n Ok(regex_match) => regex_match,\n Err(err) => {\n warn!(\"Regex exceeded complexity limit\");\n debug!(\" {err}\");\n None\n },\n }\n }\n\n /// Find the next regex match.\n ///\n /// To automatically log regex complexity errors, use [`Self::regex_search`] instead.\n fn regex_search_internal(\n &self,\n start: Point,\n end: Point,\n regex: &mut LazyDfa,\n ) -> Result<Option<Point>, Box<dyn Error>> {\n let topmost_line = self.topmost_line();\n let screen_lines = self.screen_lines() as i32;\n let last_column = self.last_column();\n\n // Advance the iterator.\n let next = match regex.direction {\n Direction::Right => GridIterator::next,\n Direction::Left => GridIterator::prev,\n };\n\n // Get start state for the DFA.\n let regex_anchored = if regex.match_all { Anchored::Yes } else { Anchored::No };\n let input = Input::new(&[]).anchored(regex_anchored);\n let mut state = regex.dfa.start_state_forward(&mut regex.cache, &input).unwrap();\n\n let mut iter = self.grid.iter_from(start);\n let mut regex_match = None;\n let mut done = false;\n\n let mut cell = iter.cell();\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n let mut c = cell.c;\n let mut last_wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n let mut point = iter.point();\n let mut last_point = point;\n let mut consumed_bytes = 0;\n\n // Reset the regex state to restart the search.\n macro_rules! reset_state {\n () => {{\n state = regex.dfa.start_state_forward(&mut regex.cache, &input)?;\n consumed_bytes = 0;\n regex_match = None;\n }};\n }\n\n 'outer: loop {\n // Convert char to array of bytes.\n let mut buf = [0; 4];\n let utf8_len = c.encode_utf8(&mut buf).len();\n\n // Pass char to DFA as individual bytes.\n for i in 0..utf8_len {\n // Inverse byte order when going left.\n let byte = match regex.direction {\n Direction::Right => buf[i],\n Direction::Left => buf[utf8_len - i - 1],\n };\n\n state = regex.dfa.next_state(&mut regex.cache, state, byte)?;\n consumed_bytes += 1;\n\n if i == 0 && state.is_match() {\n // Matches require one additional BYTE of lookahead, so we check the match state\n // for the first byte of every new character to determine if the last character\n // was a match.\n regex_match = Some(last_point);\n } else if state.is_dead() {\n if consumed_bytes == 2 {\n // Reset search if we found an empty match.\n //\n // With an unanchored search, a dead state only occurs after the end of a\n // match has been found. While we want to abort after the first match has\n // ended, we don't want empty matches since we cannot highlight them.\n //\n // So once we encounter an empty match, we reset our parser state and clear\n // the match, effectively starting a new search one character farther than\n // before.\n //\n // An empty match requires consuming `2` bytes, since the first byte will\n // report the match for the empty string, while the second byte then\n // reports the dead state indicating the first character isn't part of the\n // match.\n reset_state!();\n\n // Retry this character if first byte caused failure.\n //\n // After finding an empty match, we want to advance the search start by one\n // character. So if the first character has multiple bytes and the dead\n // state isn't reached at `i == 0`, then we continue with the rest of the\n // loop to advance the parser by one character.\n if i == 0 {\n continue 'outer;\n }\n } else {\n // Abort on dead state.\n break 'outer;\n }\n }\n }\n\n // Stop once we've reached the target point.\n if point == end || done {\n // When reaching the end-of-input, we need to notify the parser that no look-ahead\n // is possible and check for state changes.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(point);\n } else if state.is_dead() && consumed_bytes == 1 {\n // Ignore empty matches.\n regex_match = None;\n }\n\n break;\n }\n\n // Advance grid cell iterator.\n let mut cell = match next(&mut iter) {\n Some(Indexed { cell, .. }) => cell,\n None => {\n // Wrap around to other end of the scrollback buffer.\n let line = topmost_line - point.line + screen_lines - 1;\n let start = Point::new(line, last_column - point.column);\n iter = self.grid.iter_from(start);\n iter.cell()\n },\n };\n\n // Check for completion before potentially skipping over fullwidth characters.\n done = iter.point() == end;\n\n self.skip_fullwidth(&mut iter, &mut cell, regex.direction);\n\n c = cell.c;\n let wrapped = iter.cell().flags.contains(Flags::WRAPLINE);\n\n last_point = mem::replace(&mut point, iter.point());\n\n // Handle linebreaks.\n if (last_point.column == last_column && point.column == Column(0) && !last_wrapped)\n || (last_point.column == Column(0) && point.column == last_column && !wrapped)\n {\n // When reaching the end-of-input, we need to notify the parser that no\n // look-ahead is possible and check if the current state is still a match.\n state = regex.dfa.next_eoi_state(&mut regex.cache, state)?;\n if state.is_match() {\n regex_match = Some(last_point);\n }\n\n match regex_match {\n // Stop if we found a non-empty match before the linebreak.\n Some(_) if (!state.is_dead() || consumed_bytes > 1) && consumed_bytes != 0 => {\n break;\n },\n _ => reset_state!(),\n }\n }\n\n last_wrapped = wrapped;\n }\n\n Ok(regex_match)\n }\n\n /// Advance a grid iterator over fullwidth characters.\n fn skip_fullwidth<'a>(\n &self,\n iter: &'a mut GridIterator<'_, Cell>,\n cell: &mut &'a Cell,\n direction: Direction,\n ) {\n match direction {\n // In the alternate screen buffer there might not be a wide char spacer after a wide\n // char, so we only advance the iterator when the wide char is not in the last column.\n Direction::Right\n if cell.flags.contains(Flags::WIDE_CHAR)\n && iter.point().column < self.last_column() =>\n {\n iter.next();\n },\n Direction::Right if cell.flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.next() {\n *cell = new_cell;\n }\n iter.next();\n },\n Direction::Left if cell.flags.contains(Flags::WIDE_CHAR_SPACER) => {\n if let Some(Indexed { cell: new_cell, .. }) = iter.prev() {\n *cell = new_cell;\n }\n\n let prev = iter.point().sub(self, Boundary::Grid, 1);\n if self.grid[prev].flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) {\n iter.prev();\n }\n },\n _ => (),\n }\n }\n\n /// Find next matching bracket.\n pub fn bracket_search(&self, point: Point) -> Option<Point> {\n let start_char = self.grid[point].c;\n\n // Find the matching bracket we're looking for\n let (forward, end_char) = BRACKET_PAIRS.iter().find_map(|(open, close)| {\n if open == &start_char {\n Some((true, *close))\n } else if close == &start_char {\n Some((false, *open))\n } else {\n None\n }\n })?;\n\n let mut iter = self.grid.iter_from(point);\n\n // For every character match that equals the starting bracket, we\n // ignore one bracket of the opposite type.\n let mut skip_pairs = 0;\n\n loop {\n // Check the next cell\n let cell = if forward { iter.next() } else { iter.prev() };\n\n // Break if there are no more cells\n let cell = match cell {\n Some(cell) => cell,\n None => break,\n };\n\n // Check if the bracket matches\n if cell.c == end_char && skip_pairs == 0 {\n return Some(cell.point);\n } else if cell.c == start_char {\n skip_pairs += 1;\n } else if cell.c == end_char {\n skip_pairs -= 1;\n }\n }\n\n None\n }\n\n /// Find left end of semantic block.\n #[must_use]\n pub fn semantic_search_left(&self, point: Point) -> Point {\n match self.inline_search_left(point, self.semantic_escape_chars()) {\n // If we found a match, reverse for at least one cell, skipping over wide cell spacers.\n Ok(point) => {\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n self.grid\n .iter_from(point)\n .find(|cell| !cell.flags.intersects(wide_spacer))\n .map_or(point, |cell| cell.point)\n },\n Err(point) => point,\n }\n }\n\n /// Find right end of semantic block.\n #[must_use]\n pub fn semantic_search_right(&self, point: Point) -> Point {\n match self.inline_search_right(point, self.semantic_escape_chars()) {\n Ok(point) => self.grid.iter_from(point).prev().map_or(point, |cell| cell.point),\n Err(point) => point,\n }\n }\n\n /// Searching to the left, find the next character contained in `needles`.\n pub fn inline_search_left(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let mut iter = self.grid.iter_from(point);\n let last_column = self.columns() - 1;\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n while let Some(cell) = iter.prev() {\n if cell.point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n }\n\n Err(point)\n }\n\n /// Searching to the right, find the next character contained in `needles`.\n pub fn inline_search_right(&self, mut point: Point, needles: &str) -> Result<Point, Point> {\n // Limit the starting point to the last line in the history\n point.line = max(point.line, self.topmost_line());\n\n let wide_spacer = Flags::WIDE_CHAR_SPACER | Flags::LEADING_WIDE_CHAR_SPACER;\n let last_column = self.columns() - 1;\n\n // Immediately stop if start point in on line break.\n if point.column == last_column && !self.grid[point].flags.contains(Flags::WRAPLINE) {\n return Err(point);\n }\n\n for cell in self.grid.iter_from(point) {\n point = cell.point;\n\n if !cell.flags.intersects(wide_spacer) && needles.contains(cell.c) {\n return Ok(point);\n }\n\n if point.column == last_column && !cell.flags.contains(Flags::WRAPLINE) {\n break;\n }\n }\n\n Err(point)\n }\n\n /// Find the beginning of the current line across linewraps.\n pub fn line_search_left(&self, mut point: Point) -> Point {\n while point.line > self.topmost_line()\n && self.grid[point.line - 1i32][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line -= 1;\n }\n\n point.column = Column(0);\n\n point\n }\n\n /// Find the end of the current line across linewraps.\n pub fn line_search_right(&self, mut point: Point) -> Point {\n while point.line + 1 < self.screen_lines()\n && self.grid[point.line][self.last_column()].flags.contains(Flags::WRAPLINE)\n {\n point.line += 1;\n }\n\n point.column = self.last_column();\n\n point\n }\n}",
"class_signature": "impl<T> Term<T>"
} |
new | alacritty-master/alacritty/src/string.rs | pub fn new(
text: &'a str,
max_width: usize,
direction: ShortenDirection,
mut shortener: Option<char>,
) -> Self {
if text.is_empty() {
// If we don't have any text don't produce a shortener for it.
let _ = shortener.take();
}
if direction == ShortenDirection::Right {
return Self {
#[allow(clippy::iter_skip_zero)]
chars: text.chars().skip(0),
accumulated_len: 0,
text_action: TextAction::Char,
max_width,
direction,
shortener,
};
}
let mut offset = 0;
let mut current_len = 0;
let mut iter = text.chars().rev().enumerate();
while let Some((idx, ch)) = iter.next() {
let ch_width = ch.width().unwrap_or(1);
current_len += ch_width;
match current_len.cmp(&max_width) {
// We can only be here if we've faced wide character or we've already
// handled equality situation. Anyway, break.
Ordering::Greater => break,
Ordering::Equal => {
if shortener.is_some() && iter.clone().next().is_some() {
// We have one more character after, shortener will accumulate for
// the `current_len`.
break;
} else {
// The match is exact, consume shortener.
let _ = shortener.take();
}
},
Ordering::Less => (),
}
offset = idx + 1;
}
// Consume the iterator to count the number of characters in it.
let num_chars = iter.last().map_or(offset, |(idx, _)| idx + 1);
let skip_chars = num_chars - offset;
let text_action = if current_len < max_width || shortener.is_none() {
TextAction::Char
} else {
TextAction::Shortener
};
let chars = text.chars().skip(skip_chars);
Self { chars, accumulated_len: 0, text_action, max_width, direction, shortener }
} | use std::cmp::Ordering;
use std::iter::Skip;
use std::str::Chars;
use unicode_width::UnicodeWidthChar;
/// The action performed by [`StrShortener`].
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TextAction {
/// Yield a spacer.
Spacer,
/// Terminate state reached.
Terminate,
/// Yield a shortener.
Shortener,
/// Yield a character.
Char,
}
/// The direction which we should shorten.
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum ShortenDirection {
/// Shorten to the start of the string.
Left,
/// Shorten to the end of the string.
Right,
}
/// Iterator that yield shortened version of the text.
pub struct StrShortener<'a> {
chars: Skip<Chars<'a>>,
accumulated_len: usize,
max_width: usize,
direction: ShortenDirection,
shortener: Option<char>,
text_action: TextAction,
}
impl<'a> StrShortener<'a> {
pub fn new(
text: &'a str,
max_width: usize,
direction: ShortenDirection,
mut shortener: Option<char>,
) -> Self {
if text.is_empty() {
// If we don't have any text don't produce a shortener for it.
let _ = shortener.take();
}
if direction == ShortenDirection::Right {
return Self {
#[allow(clippy::iter_skip_zero)]
chars: text.chars().skip(0),
accumulated_len: 0,
text_action: TextAction::Char,
max_width,
direction,
shortener,
};
}
let mut offset = 0;
let mut current_len = 0;
let mut iter = text.chars().rev().enumerate();
while let Some((idx, ch)) = iter.next() {
let ch_width = ch.width().unwrap_or(1);
current_len += ch_width;
match current_len.cmp(&max_width) {
// We can only be here if we've faced wide character or we've already
// handled equality situation. Anyway, break.
Ordering::Greater => break,
Ordering::Equal => {
if shortener.is_some() && iter.clone().next().is_some() {
// We have one more character after, shortener will accumulate for
// the `current_len`.
break;
} else {
// The match is exact, consume shortener.
let _ = shortener.take();
}
},
Ordering::Less => (),
}
offset = idx + 1;
}
// Consume the iterator to count the number of characters in it.
let num_chars = iter.last().map_or(offset, |(idx, _)| idx + 1);
let skip_chars = num_chars - offset;
let text_action = if current_len < max_width || shortener.is_none() {
TextAction::Char
} else {
TextAction::Shortener
};
let chars = text.chars().skip(skip_chars);
Self { chars, accumulated_len: 0, text_action, max_width, direction, shortener }
}
}
impl Iterator for StrShortener<'_> {
type Item = char;
fn next(&mut self) -> Option<Self::Item> {
match self.text_action {
TextAction::Spacer => {
self.text_action = TextAction::Char;
Some(' ')
},
TextAction::Terminate => {
// We've reached the termination state.
None
},
TextAction::Shortener => {
// When we shorten from the left we yield the shortener first and process the rest.
self.text_action = if self.direction == ShortenDirection::Left {
TextAction::Char
} else {
TextAction::Terminate
};
// Consume the shortener to avoid yielding it later when shortening left.
self.shortener.take()
},
TextAction::Char => {
let ch = self.chars.next()?;
let ch_width = ch.width().unwrap_or(1);
// Advance width.
self.accumulated_len += ch_width;
if self.accumulated_len > self.max_width {
self.text_action = TextAction::Terminate;
return self.shortener;
} else if self.accumulated_len == self.max_width && self.shortener.is_some() {
// Check if we have a next char.
let has_next = self.chars.clone().next().is_some();
// We should terminate after that.
self.text_action = TextAction::Terminate;
return has_next.then(|| self.shortener.unwrap()).or(Some(ch));
}
// Add a spacer for wide character.
if ch_width == 2 {
self.text_action = TextAction::Spacer;
}
Some(ch)
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn into_shortened_with_shortener() {
let s = "Hello";
let len = s.chars().count();
assert_eq!(
"",
StrShortener::new("", 1, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
".",
StrShortener::new(s, 1, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
".",
StrShortener::new(s, 1, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
"H.",
StrShortener::new(s, 2, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
".o",
StrShortener::new(s, 2, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
s,
&StrShortener::new(s, len * 2, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
s,
&StrShortener::new(s, len * 2, ShortenDirection::Left, Some('.')).collect::<String>()
);
let s = "ちはP";
let len = 2 + 2 + 1;
assert_eq!(
".",
&StrShortener::new(s, 1, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
&".",
&StrShortener::new(s, 1, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
".",
&StrShortener::new(s, 2, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
".P",
&StrShortener::new(s, 2, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
"ち .",
&StrShortener::new(s, 3, ShortenDirection::Right, Some('.')).collect::<String>()
);
assert_eq!(
".P",
&StrShortener::new(s, 3, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
"ち は P",
&StrShortener::new(s, len * 2, ShortenDirection::Left, Some('.')).collect::<String>()
);
assert_eq!(
"ち は P",
&StrShortener::new(s, len * 2, ShortenDirection::Right, Some('.')).collect::<String>()
);
}
#[test]
fn into_shortened_without_shortener() {
let s = "Hello";
assert_eq!("", StrShortener::new("", 1, ShortenDirection::Left, None).collect::<String>());
assert_eq!(
"H",
&StrShortener::new(s, 1, ShortenDirection::Right, None).collect::<String>()
);
assert_eq!("o", &StrShortener::new(s, 1, ShortenDirection::Left, None).collect::<String>());
assert_eq!(
"He",
&StrShortener::new(s, 2, ShortenDirection::Right, None).collect::<String>()
);
assert_eq!(
"lo",
&StrShortener::new(s, 2, ShortenDirection::Left, None).collect::<String>()
);
assert_eq!(
&s,
&StrShortener::new(s, s.len(), ShortenDirection::Right, None).collect::<String>()
);
assert_eq!(
&s,
&StrShortener::new(s, s.len(), ShortenDirection::Left, None).collect::<String>()
);
let s = "こJんにちはP";
let len = 2 + 1 + 2 + 2 + 2 + 2 + 1;
assert_eq!("", &StrShortener::new(s, 1, ShortenDirection::Right, None).collect::<String>());
assert_eq!("P", &StrShortener::new(s, 1, ShortenDirection::Left, None).collect::<String>());
assert_eq!(
"こ ",
&StrShortener::new(s, 2, ShortenDirection::Right, None).collect::<String>()
);
assert_eq!("P", &StrShortener::new(s, 2, ShortenDirection::Left, None).collect::<String>());
assert_eq!(
"こ J",
&StrShortener::new(s, 3, ShortenDirection::Right, None).collect::<String>()
);
assert_eq!(
"は P",
&StrShortener::new(s, 3, ShortenDirection::Left, None).collect::<String>()
);
assert_eq!(
"こ Jん に ち は P",
&StrShortener::new(s, len, ShortenDirection::Left, None).collect::<String>()
);
assert_eq!(
"こ Jん に ち は P",
&StrShortener::new(s, len, ShortenDirection::Right, None).collect::<String>()
);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum ShortenDirection {\n /// Shorten to the start of the string.\n Left,\n\n /// Shorten to the end of the string.\n Right,\n}"
],
"name": "direction",
"type": "ShortenDirection"
},
{
"definitions": [
"pub enum Option<T> {\n /// No value.\n #[lang = \"None\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n None,\n /// Some value of type `T`.\n #[lang = \"Some\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n Some(#[stable(feature = \"rust1\", since = \"1.0.0\")] T),\n}"
],
"name": "shortener",
"type": "Option<char>"
}
],
"end_line": 106,
"name": "new",
"signature": "pub fn new(\n text: &'a str,\n max_width: usize,\n direction: ShortenDirection,\n mut shortener: Option<char>,\n ) -> Self",
"start_line": 41
} | {
"class_name": "impl<'a> StrShortener<'a> {\n pub fn new(\n text: &'a str,\n max_width: usize,\n direction: ShortenDirection,\n mut shortener: Option<char>,\n ) -> Self {\n if text.is_empty() {\n // If we don't have any text don't produce a shortener for it.\n let _ = shortener.take();\n }\n\n if direction == ShortenDirection::Right {\n return Self {\n #[allow(clippy::iter_skip_zero)]\n chars: text.chars().skip(0),\n accumulated_len: 0,\n text_action: TextAction::Char,\n max_width,\n direction,\n shortener,\n };\n }\n\n let mut offset = 0;\n let mut current_len = 0;\n\n let mut iter = text.chars().rev().enumerate();\n\n while let Some((idx, ch)) = iter.next() {\n let ch_width = ch.width().unwrap_or(1);\n current_len += ch_width;\n\n match current_len.cmp(&max_width) {\n // We can only be here if we've faced wide character or we've already\n // handled equality situation. Anyway, break.\n Ordering::Greater => break,\n Ordering::Equal => {\n if shortener.is_some() && iter.clone().next().is_some() {\n // We have one more character after, shortener will accumulate for\n // the `current_len`.\n break;\n } else {\n // The match is exact, consume shortener.\n let _ = shortener.take();\n }\n },\n Ordering::Less => (),\n }\n\n offset = idx + 1;\n }\n\n // Consume the iterator to count the number of characters in it.\n let num_chars = iter.last().map_or(offset, |(idx, _)| idx + 1);\n let skip_chars = num_chars - offset;\n\n let text_action = if current_len < max_width || shortener.is_none() {\n TextAction::Char\n } else {\n TextAction::Shortener\n };\n\n let chars = text.chars().skip(skip_chars);\n\n Self { chars, accumulated_len: 0, text_action, max_width, direction, shortener }\n }\n}",
"class_signature": "impl<'a> StrShortener<'a>"
} |
new | alacritty-master/alacritty/src/event.rs | pub fn new(
config: UiConfig,
cli_options: CliOptions,
event_loop: &EventLoop<Event>,
) -> Processor {
let proxy = event_loop.create_proxy();
let scheduler = Scheduler::new(proxy.clone());
let initial_window_options = Some(cli_options.window_options.clone());
// Disable all device events, since we don't care about them.
event_loop.listen_device_events(DeviceEvents::Never);
// SAFETY: Since this takes a pointer to the winit event loop, it MUST be dropped first,
// which is done in `loop_exiting`.
let clipboard = unsafe { Clipboard::new(event_loop.display_handle().unwrap().as_raw()) };
// Create a config monitor.
//
// The monitor watches the config file for changes and reloads it. Pending
// config changes are processed in the main loop.
let mut config_monitor = None;
if config.live_config_reload() {
config_monitor =
ConfigMonitor::new(config.config_paths.clone(), event_loop.create_proxy());
}
Processor {
initial_window_options,
initial_window_error: None,
cli_options,
proxy,
scheduler,
gl_config: None,
config: Rc::new(config),
clipboard,
windows: Default::default(),
#[cfg(unix)]
global_ipc_options: Default::default(),
config_monitor,
}
} | //! Process window events.
use crate::ConfigMonitor;
use glutin::config::GetGlConfig;
use std::borrow::Cow;
use std::cmp::min;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet, VecDeque};
use std::error::Error;
use std::ffi::OsStr;
use std::fmt::Debug;
#[cfg(not(windows))]
use std::os::unix::io::RawFd;
use std::path::PathBuf;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::{env, f32, mem};
use ahash::RandomState;
use crossfont::Size as FontSize;
use glutin::config::Config as GlutinConfig;
use glutin::display::GetGlDisplay;
use log::{debug, error, info, warn};
use winit::application::ApplicationHandler;
use winit::event::{
ElementState, Event as WinitEvent, Ime, Modifiers, MouseButton, StartCause,
Touch as TouchEvent, WindowEvent,
};
use winit::event_loop::{ActiveEventLoop, ControlFlow, DeviceEvents, EventLoop, EventLoopProxy};
use winit::raw_window_handle::HasDisplayHandle;
use winit::window::WindowId;
use alacritty_terminal::event::{Event as TerminalEvent, EventListener, Notify};
use alacritty_terminal::event_loop::Notifier;
use alacritty_terminal::grid::{BidirectionalIterator, Dimensions, Scroll};
use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point, Side};
use alacritty_terminal::selection::{Selection, SelectionType};
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::search::{Match, RegexSearch};
use alacritty_terminal::term::{self, ClipboardType, Term, TermMode};
use alacritty_terminal::vte::ansi::NamedColor;
#[cfg(unix)]
use crate::cli::{IpcConfig, ParsedOptions};
use crate::cli::{Options as CliOptions, WindowOptions};
use crate::clipboard::Clipboard;
use crate::config::ui_config::{HintAction, HintInternalAction};
use crate::config::{self, UiConfig};
#[cfg(not(windows))]
use crate::daemon::foreground_process_path;
use crate::daemon::spawn_daemon;
use crate::display::color::Rgb;
use crate::display::hint::HintMatch;
use crate::display::window::Window;
use crate::display::{Display, Preedit, SizeInfo};
use crate::input::{self, ActionContext as _, FONT_SIZE_STEP};
use crate::logging::{LOG_TARGET_CONFIG, LOG_TARGET_WINIT};
use crate::message_bar::{Message, MessageBuffer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::window_context::WindowContext;
/// Duration after the last user input until an unlimited search is performed.
pub const TYPING_SEARCH_DELAY: Duration = Duration::from_millis(500);
/// Maximum number of lines for the blocking search while still typing the search regex.
const MAX_SEARCH_WHILE_TYPING: Option<usize> = Some(1000);
/// Maximum number of search terms stored in the history.
const MAX_SEARCH_HISTORY_SIZE: usize = 255;
/// Touch zoom speed.
const TOUCH_ZOOM_FACTOR: f32 = 0.01;
/// The event processor.
///
/// Stores some state from received events and dispatches actions when they are
/// triggered.
pub struct Processor {
pub config_monitor: Option<ConfigMonitor>,
clipboard: Clipboard,
scheduler: Scheduler,
initial_window_options: Option<WindowOptions>,
initial_window_error: Option<Box<dyn Error>>,
windows: HashMap<WindowId, WindowContext, RandomState>,
proxy: EventLoopProxy<Event>,
gl_config: Option<GlutinConfig>,
#[cfg(unix)]
global_ipc_options: ParsedOptions,
cli_options: CliOptions,
config: Rc<UiConfig>,
}
impl Processor {
/// Create a new event processor.
pub fn new(
config: UiConfig,
cli_options: CliOptions,
event_loop: &EventLoop<Event>,
) -> Processor {
let proxy = event_loop.create_proxy();
let scheduler = Scheduler::new(proxy.clone());
let initial_window_options = Some(cli_options.window_options.clone());
// Disable all device events, since we don't care about them.
event_loop.listen_device_events(DeviceEvents::Never);
// SAFETY: Since this takes a pointer to the winit event loop, it MUST be dropped first,
// which is done in `loop_exiting`.
let clipboard = unsafe { Clipboard::new(event_loop.display_handle().unwrap().as_raw()) };
// Create a config monitor.
//
// The monitor watches the config file for changes and reloads it. Pending
// config changes are processed in the main loop.
let mut config_monitor = None;
if config.live_config_reload() {
config_monitor =
ConfigMonitor::new(config.config_paths.clone(), event_loop.create_proxy());
}
Processor {
initial_window_options,
initial_window_error: None,
cli_options,
proxy,
scheduler,
gl_config: None,
config: Rc::new(config),
clipboard,
windows: Default::default(),
#[cfg(unix)]
global_ipc_options: Default::default(),
config_monitor,
}
}
/// Create initial window and load GL platform.
///
/// This will initialize the OpenGL Api and pick a config that
/// will be used for the rest of the windows.
pub fn create_initial_window(
&mut self,
event_loop: &ActiveEventLoop,
window_options: WindowOptions,
) -> Result<(), Box<dyn Error>> {
let window_context = WindowContext::initial(
event_loop,
self.proxy.clone(),
self.config.clone(),
window_options,
)?;
self.gl_config = Some(window_context.display.gl_context().config());
self.windows.insert(window_context.id(), window_context);
Ok(())
}
/// Create a new terminal window.
pub fn create_window(
&mut self,
event_loop: &ActiveEventLoop,
options: WindowOptions,
) -> Result<(), Box<dyn Error>> {
let gl_config = self.gl_config.as_ref().unwrap();
// Override config with CLI/IPC options.
let mut config_overrides = options.config_overrides();
#[cfg(unix)]
config_overrides.extend_from_slice(&self.global_ipc_options);
let mut config = self.config.clone();
config = config_overrides.override_config_rc(config);
let window_context = WindowContext::additional(
gl_config,
event_loop,
self.proxy.clone(),
config,
options,
config_overrides,
)?;
self.windows.insert(window_context.id(), window_context);
Ok(())
}
/// Run the event loop.
///
/// The result is exit code generate from the loop.
pub fn run(&mut self, event_loop: EventLoop<Event>) -> Result<(), Box<dyn Error>> {
let result = event_loop.run_app(self);
if let Some(initial_window_error) = self.initial_window_error.take() {
Err(initial_window_error)
} else {
result.map_err(Into::into)
}
}
/// Check if an event is irrelevant and can be skipped.
fn skip_window_event(event: &WindowEvent) -> bool {
matches!(
event,
WindowEvent::KeyboardInput { is_synthetic: true, .. }
| WindowEvent::ActivationTokenDone { .. }
| WindowEvent::DoubleTapGesture { .. }
| WindowEvent::TouchpadPressure { .. }
| WindowEvent::RotationGesture { .. }
| WindowEvent::CursorEntered { .. }
| WindowEvent::PinchGesture { .. }
| WindowEvent::AxisMotion { .. }
| WindowEvent::PanGesture { .. }
| WindowEvent::HoveredFileCancelled
| WindowEvent::Destroyed
| WindowEvent::ThemeChanged(_)
| WindowEvent::HoveredFile(_)
| WindowEvent::Moved(_)
)
}
}
impl ApplicationHandler<Event> for Processor {
fn resumed(&mut self, _event_loop: &ActiveEventLoop) {}
fn new_events(&mut self, event_loop: &ActiveEventLoop, cause: StartCause) {
if cause != StartCause::Init || self.cli_options.daemon {
return;
}
if let Some(window_options) = self.initial_window_options.take() {
if let Err(err) = self.create_initial_window(event_loop, window_options) {
self.initial_window_error = Some(err);
event_loop.exit();
return;
}
}
info!("Initialisation complete");
}
fn window_event(
&mut self,
_event_loop: &ActiveEventLoop,
window_id: WindowId,
event: WindowEvent,
) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "{event:?}");
}
// Ignore all events we do not care about.
if Self::skip_window_event(&event) {
return;
}
let window_context = match self.windows.get_mut(&window_id) {
Some(window_context) => window_context,
None => return,
};
let is_redraw = matches!(event, WindowEvent::RedrawRequested);
window_context.handle_event(
#[cfg(target_os = "macos")]
_event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::WindowEvent { window_id, event },
);
if is_redraw {
window_context.draw(&mut self.scheduler);
}
}
fn user_event(&mut self, event_loop: &ActiveEventLoop, event: Event) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "{event:?}");
}
// Handle events which don't mandate the WindowId.
match (event.payload, event.window_id.as_ref()) {
// Process IPC config update.
#[cfg(unix)]
(EventType::IpcConfig(ipc_config), window_id) => {
// Try and parse options as toml.
let mut options = ParsedOptions::from_options(&ipc_config.options);
// Override IPC config for each window with matching ID.
for (_, window_context) in self
.windows
.iter_mut()
.filter(|(id, _)| window_id.is_none() || window_id == Some(*id))
{
if ipc_config.reset {
window_context.reset_window_config(self.config.clone());
} else {
window_context.add_window_config(self.config.clone(), &options);
}
}
// Persist global options for future windows.
if window_id.is_none() {
if ipc_config.reset {
self.global_ipc_options.clear();
} else {
self.global_ipc_options.append(&mut options);
}
}
},
(EventType::ConfigReload(path), _) => {
// Clear config logs from message bar for all terminals.
for window_context in self.windows.values_mut() {
if !window_context.message_buffer.is_empty() {
window_context.message_buffer.remove_target(LOG_TARGET_CONFIG);
window_context.display.pending_update.dirty = true;
}
}
// Load config and update each terminal.
if let Ok(config) = config::reload(&path, &mut self.cli_options) {
self.config = Rc::new(config);
// Restart config monitor if imports changed.
if let Some(monitor) = self.config_monitor.take() {
let paths = &self.config.config_paths;
self.config_monitor = if monitor.needs_restart(paths) {
monitor.shutdown();
ConfigMonitor::new(paths.clone(), self.proxy.clone())
} else {
Some(monitor)
};
}
for window_context in self.windows.values_mut() {
window_context.update_config(self.config.clone());
}
}
},
// Create a new terminal window.
(EventType::CreateWindow(options), _) => {
// XXX Ensure that no context is current when creating a new window,
// otherwise it may lock the backing buffer of the
// surface of current context when asking
// e.g. EGL on Wayland to create a new context.
for window_context in self.windows.values_mut() {
window_context.display.make_not_current();
}
if self.gl_config.is_none() {
// Handle initial window creation in daemon mode.
if let Err(err) = self.create_initial_window(event_loop, options) {
self.initial_window_error = Some(err);
event_loop.exit();
}
} else if let Err(err) = self.create_window(event_loop, options) {
error!("Could not open window: {:?}", err);
}
},
// Process events affecting all windows.
(payload, None) => {
let event = WinitEvent::UserEvent(Event::new(payload, None));
for window_context in self.windows.values_mut() {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
event.clone(),
);
}
},
(EventType::Terminal(TerminalEvent::Wakeup), Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.dirty = true;
if window_context.display.window.has_frame {
window_context.display.window.request_redraw();
}
}
},
(EventType::Terminal(TerminalEvent::Exit), Some(window_id)) => {
// Remove the closed terminal.
let window_context = match self.windows.entry(*window_id) {
// Don't exit when terminal exits if user asked to hold the window.
Entry::Occupied(window_context)
if !window_context.get().display.window.hold =>
{
window_context.remove()
},
_ => return,
};
// Unschedule pending events.
self.scheduler.unschedule_window(window_context.id());
// Shutdown if no more terminals are open.
if self.windows.is_empty() && !self.cli_options.daemon {
// Write ref tests of last window to disk.
if self.config.debug.ref_test {
window_context.write_ref_test_results();
}
event_loop.exit();
}
},
// NOTE: This event bypasses batching to minimize input latency.
(EventType::Frame, Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.display.window.has_frame = true;
if window_context.dirty {
window_context.display.window.request_redraw();
}
}
},
(payload, Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::UserEvent(Event::new(payload, *window_id)),
);
}
},
};
}
fn about_to_wait(&mut self, event_loop: &ActiveEventLoop) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "About to wait");
}
// Dispatch event to all windows.
for window_context in self.windows.values_mut() {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::AboutToWait,
);
}
// Update the scheduler after event processing to ensure
// the event loop deadline is as accurate as possible.
let control_flow = match self.scheduler.update() {
Some(instant) => ControlFlow::WaitUntil(instant),
None => ControlFlow::Wait,
};
event_loop.set_control_flow(control_flow);
}
fn exiting(&mut self, _event_loop: &ActiveEventLoop) {
if self.config.debug.print_events {
info!("Exiting the event loop");
}
match self.gl_config.take().map(|config| config.display()) {
#[cfg(not(target_os = "macos"))]
Some(glutin::display::Display::Egl(display)) => {
// Ensure that all the windows are dropped, so the destructors for
// Renderer and contexts ran.
self.windows.clear();
// SAFETY: the display is being destroyed after destroying all the
// windows, thus no attempt to access the EGL state will be made.
unsafe {
display.terminate();
}
},
_ => (),
}
// SAFETY: The clipboard must be dropped before the event loop, so use the nop clipboard
// as a safe placeholder.
mem::swap(&mut self.clipboard, &mut Clipboard::new_nop());
}
}
/// Alacritty events.
#[derive(Debug, Clone)]
pub struct Event {
/// Limit event to a specific window.
window_id: Option<WindowId>,
/// Event payload.
payload: EventType,
}
impl Event {
pub fn new<I: Into<Option<WindowId>>>(payload: EventType, window_id: I) -> Self {
Self { window_id: window_id.into(), payload }
}
}
impl From<Event> for WinitEvent<Event> {
fn from(event: Event) -> Self {
WinitEvent::UserEvent(event)
}
}
/// Alacritty events.
#[derive(Debug, Clone)]
pub enum EventType {
Terminal(TerminalEvent),
ConfigReload(PathBuf),
Message(Message),
Scroll(Scroll),
CreateWindow(WindowOptions),
#[cfg(unix)]
IpcConfig(IpcConfig),
BlinkCursor,
BlinkCursorTimeout,
SearchNext,
Frame,
}
impl From<TerminalEvent> for EventType {
fn from(event: TerminalEvent) -> Self {
Self::Terminal(event)
}
}
/// Regex search state.
pub struct SearchState {
/// Search direction.
pub direction: Direction,
/// Current position in the search history.
pub history_index: Option<usize>,
/// Change in display offset since the beginning of the search.
display_offset_delta: i32,
/// Search origin in viewport coordinates relative to original display offset.
origin: Point,
/// Focused match during active search.
focused_match: Option<Match>,
/// Search regex and history.
///
/// During an active search, the first element is the user's current input.
///
/// While going through history, the [`SearchState::history_index`] will point to the element
/// in history which is currently being previewed.
history: VecDeque<String>,
/// Compiled search automatons.
dfas: Option<RegexSearch>,
}
impl SearchState {
/// Search regex text if a search is active.
pub fn regex(&self) -> Option<&String> {
self.history_index.and_then(|index| self.history.get(index))
}
/// Direction of the search from the search origin.
pub fn direction(&self) -> Direction {
self.direction
}
/// Focused match during vi-less search.
pub fn focused_match(&self) -> Option<&Match> {
self.focused_match.as_ref()
}
/// Clear the focused match.
pub fn clear_focused_match(&mut self) {
self.focused_match = None;
}
/// Active search dfas.
pub fn dfas(&mut self) -> Option<&mut RegexSearch> {
self.dfas.as_mut()
}
/// Search regex text if a search is active.
fn regex_mut(&mut self) -> Option<&mut String> {
self.history_index.and_then(move |index| self.history.get_mut(index))
}
}
impl Default for SearchState {
fn default() -> Self {
Self {
direction: Direction::Right,
display_offset_delta: Default::default(),
focused_match: Default::default(),
history_index: Default::default(),
history: Default::default(),
origin: Default::default(),
dfas: Default::default(),
}
}
}
/// Vi inline search state.
pub struct InlineSearchState {
/// Whether inline search is currently waiting for search character input.
pub char_pending: bool,
pub character: Option<char>,
direction: Direction,
stop_short: bool,
}
impl Default for InlineSearchState {
fn default() -> Self {
Self {
direction: Direction::Right,
char_pending: Default::default(),
stop_short: Default::default(),
character: Default::default(),
}
}
}
pub struct ActionContext<'a, N, T> {
pub notifier: &'a mut N,
pub terminal: &'a mut Term<T>,
pub clipboard: &'a mut Clipboard,
pub mouse: &'a mut Mouse,
pub touch: &'a mut TouchPurpose,
pub modifiers: &'a mut Modifiers,
pub display: &'a mut Display,
pub message_buffer: &'a mut MessageBuffer,
pub config: &'a UiConfig,
pub cursor_blink_timed_out: &'a mut bool,
#[cfg(target_os = "macos")]
pub event_loop: &'a ActiveEventLoop,
pub event_proxy: &'a EventLoopProxy<Event>,
pub scheduler: &'a mut Scheduler,
pub search_state: &'a mut SearchState,
pub inline_search_state: &'a mut InlineSearchState,
pub dirty: &'a mut bool,
pub occluded: &'a mut bool,
pub preserve_title: bool,
#[cfg(not(windows))]
pub master_fd: RawFd,
#[cfg(not(windows))]
pub shell_pid: u32,
}
impl<'a, N: Notify + 'a, T: EventListener> input::ActionContext<T> for ActionContext<'a, N, T> {
#[inline]
fn write_to_pty<B: Into<Cow<'static, [u8]>>>(&self, val: B) {
self.notifier.notify(val);
}
/// Request a redraw.
#[inline]
fn mark_dirty(&mut self) {
*self.dirty = true;
}
#[inline]
fn size_info(&self) -> SizeInfo {
self.display.size_info
}
fn scroll(&mut self, scroll: Scroll) {
let old_offset = self.terminal.grid().display_offset() as i32;
let old_vi_cursor = self.terminal.vi_mode_cursor;
self.terminal.scroll_display(scroll);
let lines_changed = old_offset - self.terminal.grid().display_offset() as i32;
// Keep track of manual display offset changes during search.
if self.search_active() {
self.search_state.display_offset_delta += lines_changed;
}
let vi_mode = self.terminal.mode().contains(TermMode::VI);
// Update selection.
if vi_mode && self.terminal.selection.as_ref().is_some_and(|s| !s.is_empty()) {
self.update_selection(self.terminal.vi_mode_cursor.point, Side::Right);
} else if self.mouse.left_button_state == ElementState::Pressed
|| self.mouse.right_button_state == ElementState::Pressed
{
let display_offset = self.terminal.grid().display_offset();
let point = self.mouse.point(&self.size_info(), display_offset);
self.update_selection(point, self.mouse.cell_side);
}
// Scrolling inside Vi mode moves the cursor, so start typing.
if vi_mode {
self.on_typing_start();
}
// Update dirty if actually scrolled or moved Vi cursor in Vi mode.
*self.dirty |=
lines_changed != 0 || (vi_mode && old_vi_cursor != self.terminal.vi_mode_cursor);
}
// Copy text selection.
fn copy_selection(&mut self, ty: ClipboardType) {
let text = match self.terminal.selection_to_string().filter(|s| !s.is_empty()) {
Some(text) => text,
None => return,
};
if ty == ClipboardType::Selection && self.config.selection.save_to_clipboard {
self.clipboard.store(ClipboardType::Clipboard, text.clone());
}
self.clipboard.store(ty, text);
}
fn selection_is_empty(&self) -> bool {
self.terminal.selection.as_ref().map_or(true, Selection::is_empty)
}
fn clear_selection(&mut self) {
// Clear the selection on the terminal.
let selection = self.terminal.selection.take();
// Mark the terminal as dirty when selection wasn't empty.
*self.dirty |= selection.is_some_and(|s| !s.is_empty());
}
fn update_selection(&mut self, mut point: Point, side: Side) {
let mut selection = match self.terminal.selection.take() {
Some(selection) => selection,
None => return,
};
// Treat motion over message bar like motion over the last line.
point.line = min(point.line, self.terminal.bottommost_line());
// Update selection.
selection.update(point, side);
// Move vi cursor and expand selection.
if self.terminal.mode().contains(TermMode::VI) && !self.search_active() {
self.terminal.vi_mode_cursor.point = point;
selection.include_all();
}
self.terminal.selection = Some(selection);
*self.dirty = true;
}
fn start_selection(&mut self, ty: SelectionType, point: Point, side: Side) {
self.terminal.selection = Some(Selection::new(ty, point, side));
*self.dirty = true;
self.copy_selection(ClipboardType::Selection);
}
fn toggle_selection(&mut self, ty: SelectionType, point: Point, side: Side) {
match &mut self.terminal.selection {
Some(selection) if selection.ty == ty && !selection.is_empty() => {
self.clear_selection();
},
Some(selection) if !selection.is_empty() => {
selection.ty = ty;
*self.dirty = true;
self.copy_selection(ClipboardType::Selection);
},
_ => self.start_selection(ty, point, side),
}
}
#[inline]
fn mouse_mode(&self) -> bool {
self.terminal.mode().intersects(TermMode::MOUSE_MODE)
&& !self.terminal.mode().contains(TermMode::VI)
}
#[inline]
fn mouse_mut(&mut self) -> &mut Mouse {
self.mouse
}
#[inline]
fn mouse(&self) -> &Mouse {
self.mouse
}
#[inline]
fn touch_purpose(&mut self) -> &mut TouchPurpose {
self.touch
}
#[inline]
fn modifiers(&mut self) -> &mut Modifiers {
self.modifiers
}
#[inline]
fn window(&mut self) -> &mut Window {
&mut self.display.window
}
#[inline]
fn display(&mut self) -> &mut Display {
self.display
}
#[inline]
fn terminal(&self) -> &Term<T> {
self.terminal
}
#[inline]
fn terminal_mut(&mut self) -> &mut Term<T> {
self.terminal
}
fn spawn_new_instance(&mut self) {
let mut env_args = env::args();
let alacritty = env_args.next().unwrap();
let mut args: Vec<String> = Vec::new();
// Reuse the arguments passed to Alacritty for the new instance.
#[allow(clippy::while_let_on_iterator)]
while let Some(arg) = env_args.next() {
// New instances shouldn't inherit command.
if arg == "-e" || arg == "--command" {
break;
}
// On unix, the working directory of the foreground shell is used by `start_daemon`.
#[cfg(not(windows))]
if arg == "--working-directory" {
let _ = env_args.next();
continue;
}
args.push(arg);
}
self.spawn_daemon(&alacritty, &args);
}
#[cfg(not(windows))]
fn create_new_window(&mut self, #[cfg(target_os = "macos")] tabbing_id: Option<String>) {
let mut options = WindowOptions::default();
options.terminal_options.working_directory =
foreground_process_path(self.master_fd, self.shell_pid).ok();
#[cfg(target_os = "macos")]
{
options.window_tabbing_id = tabbing_id;
}
let _ = self.event_proxy.send_event(Event::new(EventType::CreateWindow(options), None));
}
#[cfg(windows)]
fn create_new_window(&mut self) {
let _ = self
.event_proxy
.send_event(Event::new(EventType::CreateWindow(WindowOptions::default()), None));
}
fn spawn_daemon<I, S>(&self, program: &str, args: I)
where
I: IntoIterator<Item = S> + Debug + Copy,
S: AsRef<OsStr>,
{
#[cfg(not(windows))]
let result = spawn_daemon(program, args, self.master_fd, self.shell_pid);
#[cfg(windows)]
let result = spawn_daemon(program, args);
match result {
Ok(_) => debug!("Launched {} with args {:?}", program, args),
Err(err) => warn!("Unable to launch {program} with args {args:?}: {err}"),
}
}
fn change_font_size(&mut self, delta: f32) {
// Round to pick integral px steps, since fonts look better on them.
let new_size = self.display.font_size.as_px().round() + delta;
self.display.font_size = FontSize::from_px(new_size);
let font = self.config.font.clone().with_size(self.display.font_size);
self.display.pending_update.set_font(font);
}
fn reset_font_size(&mut self) {
let scale_factor = self.display.window.scale_factor as f32;
self.display.font_size = self.config.font.size().scale(scale_factor);
self.display
.pending_update
.set_font(self.config.font.clone().with_size(self.display.font_size));
}
#[inline]
fn pop_message(&mut self) {
if !self.message_buffer.is_empty() {
self.display.pending_update.dirty = true;
self.message_buffer.pop();
}
}
#[inline]
fn start_search(&mut self, direction: Direction) {
// Only create new history entry if the previous regex wasn't empty.
if self.search_state.history.front().map_or(true, |regex| !regex.is_empty()) {
self.search_state.history.push_front(String::new());
self.search_state.history.truncate(MAX_SEARCH_HISTORY_SIZE);
}
self.search_state.history_index = Some(0);
self.search_state.direction = direction;
self.search_state.focused_match = None;
// Store original search position as origin and reset location.
if self.terminal.mode().contains(TermMode::VI) {
self.search_state.origin = self.terminal.vi_mode_cursor.point;
self.search_state.display_offset_delta = 0;
// Adjust origin for content moving upward on search start.
if self.terminal.grid().cursor.point.line + 1 == self.terminal.screen_lines() {
self.search_state.origin.line -= 1;
}
} else {
let viewport_top = Line(-(self.terminal.grid().display_offset() as i32)) - 1;
let viewport_bottom = viewport_top + self.terminal.bottommost_line();
let last_column = self.terminal.last_column();
self.search_state.origin = match direction {
Direction::Right => Point::new(viewport_top, Column(0)),
Direction::Left => Point::new(viewport_bottom, last_column),
};
}
// Enable IME so we can input into the search bar with it if we were in Vi mode.
self.window().set_ime_allowed(true);
self.display.damage_tracker.frame().mark_fully_damaged();
self.display.pending_update.dirty = true;
}
#[inline]
fn start_seeded_search(&mut self, direction: Direction, text: String) {
let origin = self.terminal.vi_mode_cursor.point;
// Start new search.
self.clear_selection();
self.start_search(direction);
// Enter initial selection text.
for c in text.chars() {
if let '$' | '('..='+' | '?' | '['..='^' | '{'..='}' = c {
self.search_input('\\');
}
self.search_input(c);
}
// Leave search mode.
self.confirm_search();
if !self.terminal.mode().contains(TermMode::VI) {
return;
}
// Find the target vi cursor point by going to the next match to the right of the origin,
// then jump to the next search match in the target direction.
let target = self.search_next(origin, Direction::Right, Side::Right).and_then(|rm| {
let regex_match = match direction {
Direction::Right => {
let origin = rm.end().add(self.terminal, Boundary::None, 1);
self.search_next(origin, Direction::Right, Side::Left)?
},
Direction::Left => {
let origin = rm.start().sub(self.terminal, Boundary::None, 1);
self.search_next(origin, Direction::Left, Side::Left)?
},
};
Some(*regex_match.start())
});
// Move the vi cursor to the target position.
if let Some(target) = target {
self.terminal_mut().vi_goto_point(target);
self.mark_dirty();
}
}
#[inline]
fn confirm_search(&mut self) {
// Just cancel search when not in vi mode.
if !self.terminal.mode().contains(TermMode::VI) {
self.cancel_search();
return;
}
// Force unlimited search if the previous one was interrupted.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
if self.scheduler.scheduled(timer_id) {
self.goto_match(None);
}
self.exit_search();
}
#[inline]
fn cancel_search(&mut self) {
if self.terminal.mode().contains(TermMode::VI) {
// Recover pre-search state in vi mode.
self.search_reset_state();
} else if let Some(focused_match) = &self.search_state.focused_match {
// Create a selection for the focused match.
let start = *focused_match.start();
let end = *focused_match.end();
self.start_selection(SelectionType::Simple, start, Side::Left);
self.update_selection(end, Side::Right);
self.copy_selection(ClipboardType::Selection);
}
self.search_state.dfas = None;
self.exit_search();
}
#[inline]
fn search_input(&mut self, c: char) {
match self.search_state.history_index {
Some(0) => (),
// When currently in history, replace active regex with history on change.
Some(index) => {
self.search_state.history[0] = self.search_state.history[index].clone();
self.search_state.history_index = Some(0);
},
None => return,
}
let regex = &mut self.search_state.history[0];
match c {
// Handle backspace/ctrl+h.
'\x08' | '\x7f' => {
let _ = regex.pop();
},
// Add ascii and unicode text.
' '..='~' | '\u{a0}'..='\u{10ffff}' => regex.push(c),
// Ignore non-printable characters.
_ => return,
}
if !self.terminal.mode().contains(TermMode::VI) {
// Clear selection so we do not obstruct any matches.
self.terminal.selection = None;
}
self.update_search();
}
#[inline]
fn search_pop_word(&mut self) {
if let Some(regex) = self.search_state.regex_mut() {
*regex = regex.trim_end().to_owned();
regex.truncate(regex.rfind(' ').map_or(0, |i| i + 1));
self.update_search();
}
}
/// Go to the previous regex in the search history.
#[inline]
fn search_history_previous(&mut self) {
let index = match &mut self.search_state.history_index {
None => return,
Some(index) if *index + 1 >= self.search_state.history.len() => return,
Some(index) => index,
};
*index += 1;
self.update_search();
}
/// Go to the previous regex in the search history.
#[inline]
fn search_history_next(&mut self) {
let index = match &mut self.search_state.history_index {
Some(0) | None => return,
Some(index) => index,
};
*index -= 1;
self.update_search();
}
#[inline]
fn advance_search_origin(&mut self, direction: Direction) {
// Use focused match as new search origin if available.
if let Some(focused_match) = &self.search_state.focused_match {
let new_origin = match direction {
Direction::Right => focused_match.end().add(self.terminal, Boundary::None, 1),
Direction::Left => focused_match.start().sub(self.terminal, Boundary::None, 1),
};
self.terminal.scroll_to_point(new_origin);
self.search_state.display_offset_delta = 0;
self.search_state.origin = new_origin;
}
// Search for the next match using the supplied direction.
let search_direction = mem::replace(&mut self.search_state.direction, direction);
self.goto_match(None);
self.search_state.direction = search_direction;
// If we found a match, we set the search origin right in front of it to make sure that
// after modifications to the regex the search is started without moving the focused match
// around.
let focused_match = match &self.search_state.focused_match {
Some(focused_match) => focused_match,
None => return,
};
// Set new origin to the left/right of the match, depending on search direction.
let new_origin = match self.search_state.direction {
Direction::Right => *focused_match.start(),
Direction::Left => *focused_match.end(),
};
// Store the search origin with display offset by checking how far we need to scroll to it.
let old_display_offset = self.terminal.grid().display_offset() as i32;
self.terminal.scroll_to_point(new_origin);
let new_display_offset = self.terminal.grid().display_offset() as i32;
self.search_state.display_offset_delta = new_display_offset - old_display_offset;
// Store origin and scroll back to the match.
self.terminal.scroll_display(Scroll::Delta(-self.search_state.display_offset_delta));
self.search_state.origin = new_origin;
}
/// Find the next search match.
fn search_next(&mut self, origin: Point, direction: Direction, side: Side) -> Option<Match> {
self.search_state
.dfas
.as_mut()
.and_then(|dfas| self.terminal.search_next(dfas, origin, direction, side, None))
}
#[inline]
fn search_direction(&self) -> Direction {
self.search_state.direction
}
#[inline]
fn search_active(&self) -> bool {
self.search_state.history_index.is_some()
}
/// Handle keyboard typing start.
///
/// This will temporarily disable some features like terminal cursor blinking or the mouse
/// cursor.
///
/// All features are re-enabled again automatically.
#[inline]
fn on_typing_start(&mut self) {
// Disable cursor blinking.
let timer_id = TimerId::new(Topic::BlinkCursor, self.display.window.id());
if self.scheduler.unschedule(timer_id).is_some() {
self.schedule_blinking();
// Mark the cursor as visible and queue redraw if the cursor was hidden.
if mem::take(&mut self.display.cursor_hidden) {
*self.dirty = true;
}
} else if *self.cursor_blink_timed_out {
self.update_cursor_blinking();
}
// Hide mouse cursor.
if self.config.mouse.hide_when_typing {
self.display.window.set_mouse_visible(false);
}
}
/// Process a new character for keyboard hints.
fn hint_input(&mut self, c: char) {
if let Some(hint) = self.display.hint_state.keyboard_input(self.terminal, c) {
self.mouse.block_hint_launcher = false;
self.trigger_hint(&hint);
}
*self.dirty = true;
}
/// Trigger a hint action.
fn trigger_hint(&mut self, hint: &HintMatch) {
if self.mouse.block_hint_launcher {
return;
}
let hint_bounds = hint.bounds();
let text = match hint.text(self.terminal) {
Some(text) => text,
None => return,
};
match &hint.action() {
// Launch an external program.
HintAction::Command(command) => {
let mut args = command.args().to_vec();
args.push(text.into());
self.spawn_daemon(command.program(), &args);
},
// Copy the text to the clipboard.
HintAction::Action(HintInternalAction::Copy) => {
self.clipboard.store(ClipboardType::Clipboard, text);
},
// Write the text to the PTY/search.
HintAction::Action(HintInternalAction::Paste) => self.paste(&text, true),
// Select the text.
HintAction::Action(HintInternalAction::Select) => {
self.start_selection(SelectionType::Simple, *hint_bounds.start(), Side::Left);
self.update_selection(*hint_bounds.end(), Side::Right);
self.copy_selection(ClipboardType::Selection);
},
// Move the vi mode cursor.
HintAction::Action(HintInternalAction::MoveViModeCursor) => {
// Enter vi mode if we're not in it already.
if !self.terminal.mode().contains(TermMode::VI) {
self.terminal.toggle_vi_mode();
}
self.terminal.vi_goto_point(*hint_bounds.start());
self.mark_dirty();
},
}
}
/// Expand the selection to the current mouse cursor position.
#[inline]
fn expand_selection(&mut self) {
let control = self.modifiers().state().control_key();
let selection_type = match self.mouse().click_state {
ClickState::None => return,
_ if control => SelectionType::Block,
ClickState::Click => SelectionType::Simple,
ClickState::DoubleClick => SelectionType::Semantic,
ClickState::TripleClick => SelectionType::Lines,
};
// Load mouse point, treating message bar and padding as the closest cell.
let display_offset = self.terminal().grid().display_offset();
let point = self.mouse().point(&self.size_info(), display_offset);
let cell_side = self.mouse().cell_side;
let selection = match &mut self.terminal_mut().selection {
Some(selection) => selection,
None => return,
};
selection.ty = selection_type;
self.update_selection(point, cell_side);
// Move vi mode cursor to mouse click position.
if self.terminal().mode().contains(TermMode::VI) && !self.search_active() {
self.terminal_mut().vi_mode_cursor.point = point;
}
}
/// Get the semantic word at the specified point.
fn semantic_word(&self, point: Point) -> String {
let terminal = self.terminal();
let grid = terminal.grid();
// Find the next semantic word boundary to the right.
let mut end = terminal.semantic_search_right(point);
// Get point at which skipping over semantic characters has led us back to the
// original character.
let start_cell = &grid[point];
let search_end = if start_cell.flags.intersects(Flags::LEADING_WIDE_CHAR_SPACER) {
point.add(terminal, Boundary::None, 2)
} else if start_cell.flags.intersects(Flags::WIDE_CHAR) {
point.add(terminal, Boundary::None, 1)
} else {
point
};
// Keep moving until we're not on top of a semantic escape character.
let semantic_chars = terminal.semantic_escape_chars();
loop {
let cell = &grid[end];
// Get cell's character, taking wide characters into account.
let c = if cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
grid[end.sub(terminal, Boundary::None, 1)].c
} else {
cell.c
};
if !semantic_chars.contains(c) {
break;
}
end = terminal.semantic_search_right(end.add(terminal, Boundary::None, 1));
// Stop if the entire grid is only semantic escape characters.
if end == search_end {
return String::new();
}
}
// Find the beginning of the semantic word.
let start = terminal.semantic_search_left(end);
terminal.bounds_to_string(start, end)
}
/// Handle beginning of terminal text input.
fn on_terminal_input_start(&mut self) {
self.on_typing_start();
self.clear_selection();
if self.terminal().grid().display_offset() != 0 {
self.scroll(Scroll::Bottom);
}
}
/// Paste a text into the terminal.
fn paste(&mut self, text: &str, bracketed: bool) {
if self.search_active() {
for c in text.chars() {
self.search_input(c);
}
} else if self.inline_search_state.char_pending {
self.inline_search_input(text);
} else if bracketed && self.terminal().mode().contains(TermMode::BRACKETED_PASTE) {
self.on_terminal_input_start();
self.write_to_pty(&b"\x1b[200~"[..]);
// Write filtered escape sequences.
//
// We remove `\x1b` to ensure it's impossible for the pasted text to write the bracketed
// paste end escape `\x1b[201~` and `\x03` since some shells incorrectly terminate
// bracketed paste when they receive it.
let filtered = text.replace(['\x1b', '\x03'], "");
self.write_to_pty(filtered.into_bytes());
self.write_to_pty(&b"\x1b[201~"[..]);
} else {
self.on_terminal_input_start();
let payload = if bracketed {
// In non-bracketed (ie: normal) mode, terminal applications cannot distinguish
// pasted data from keystrokes.
//
// In theory, we should construct the keystrokes needed to produce the data we are
// pasting... since that's neither practical nor sensible (and probably an
// impossible task to solve in a general way), we'll just replace line breaks
// (windows and unix style) with a single carriage return (\r, which is what the
// Enter key produces).
text.replace("\r\n", "\r").replace('\n', "\r").into_bytes()
} else {
// When we explicitly disable bracketed paste don't manipulate with the input,
// so we pass user input as is.
text.to_owned().into_bytes()
};
self.write_to_pty(payload);
}
}
/// Toggle the vi mode status.
#[inline]
fn toggle_vi_mode(&mut self) {
let was_in_vi_mode = self.terminal.mode().contains(TermMode::VI);
if was_in_vi_mode {
// If we had search running when leaving Vi mode we should mark terminal fully damaged
// to cleanup highlighted results.
if self.search_state.dfas.take().is_some() {
self.display.damage_tracker.frame().mark_fully_damaged();
}
} else {
self.clear_selection();
}
if self.search_active() {
self.cancel_search();
}
// We don't want IME in Vi mode.
self.window().set_ime_allowed(was_in_vi_mode);
self.terminal.toggle_vi_mode();
*self.dirty = true;
}
/// Get vi inline search state.
fn inline_search_state(&mut self) -> &mut InlineSearchState {
self.inline_search_state
}
/// Start vi mode inline search.
fn start_inline_search(&mut self, direction: Direction, stop_short: bool) {
self.inline_search_state.stop_short = stop_short;
self.inline_search_state.direction = direction;
self.inline_search_state.char_pending = true;
self.inline_search_state.character = None;
}
/// Jump to the next matching character in the line.
fn inline_search_next(&mut self) {
let direction = self.inline_search_state.direction;
self.inline_search(direction);
}
/// Jump to the next matching character in the line.
fn inline_search_previous(&mut self) {
let direction = self.inline_search_state.direction.opposite();
self.inline_search(direction);
}
/// Process input during inline search.
fn inline_search_input(&mut self, text: &str) {
// Ignore input with empty text, like modifier keys.
let c = match text.chars().next() {
Some(c) => c,
None => return,
};
self.inline_search_state.char_pending = false;
self.inline_search_state.character = Some(c);
self.window().set_ime_allowed(false);
// Immediately move to the captured character.
self.inline_search_next();
}
fn message(&self) -> Option<&Message> {
self.message_buffer.message()
}
fn config(&self) -> &UiConfig {
self.config
}
#[cfg(target_os = "macos")]
fn event_loop(&self) -> &ActiveEventLoop {
self.event_loop
}
fn clipboard_mut(&mut self) -> &mut Clipboard {
self.clipboard
}
fn scheduler_mut(&mut self) -> &mut Scheduler {
self.scheduler
}
}
impl<'a, N: Notify + 'a, T: EventListener> ActionContext<'a, N, T> {
fn update_search(&mut self) {
let regex = match self.search_state.regex() {
Some(regex) => regex,
None => return,
};
// Hide cursor while typing into the search bar.
if self.config.mouse.hide_when_typing {
self.display.window.set_mouse_visible(false);
}
if regex.is_empty() {
// Stop search if there's nothing to search for.
self.search_reset_state();
self.search_state.dfas = None;
} else {
// Create search dfas for the new regex string.
self.search_state.dfas = RegexSearch::new(regex).ok();
// Update search highlighting.
self.goto_match(MAX_SEARCH_WHILE_TYPING);
}
*self.dirty = true;
}
/// Reset terminal to the state before search was started.
fn search_reset_state(&mut self) {
// Unschedule pending timers.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
self.scheduler.unschedule(timer_id);
// Clear focused match.
self.search_state.focused_match = None;
// The viewport reset logic is only needed for vi mode, since without it our origin is
// always at the current display offset instead of at the vi cursor position which we need
// to recover to.
if !self.terminal.mode().contains(TermMode::VI) {
return;
}
// Reset display offset and cursor position.
self.terminal.vi_mode_cursor.point = self.search_state.origin;
self.terminal.scroll_display(Scroll::Delta(self.search_state.display_offset_delta));
self.search_state.display_offset_delta = 0;
*self.dirty = true;
}
/// Jump to the first regex match from the search origin.
fn goto_match(&mut self, mut limit: Option<usize>) {
let dfas = match &mut self.search_state.dfas {
Some(dfas) => dfas,
None => return,
};
// Limit search only when enough lines are available to run into the limit.
limit = limit.filter(|&limit| limit <= self.terminal.total_lines());
// Jump to the next match.
let direction = self.search_state.direction;
let clamped_origin = self.search_state.origin.grid_clamp(self.terminal, Boundary::Grid);
match self.terminal.search_next(dfas, clamped_origin, direction, Side::Left, limit) {
Some(regex_match) => {
let old_offset = self.terminal.grid().display_offset() as i32;
if self.terminal.mode().contains(TermMode::VI) {
// Move vi cursor to the start of the match.
self.terminal.vi_goto_point(*regex_match.start());
} else {
// Select the match when vi mode is not active.
self.terminal.scroll_to_point(*regex_match.start());
}
// Update the focused match.
self.search_state.focused_match = Some(regex_match);
// Store number of lines the viewport had to be moved.
let display_offset = self.terminal.grid().display_offset();
self.search_state.display_offset_delta += old_offset - display_offset as i32;
// Since we found a result, we require no delayed re-search.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
self.scheduler.unschedule(timer_id);
},
// Reset viewport only when we know there is no match, to prevent unnecessary jumping.
None if limit.is_none() => self.search_reset_state(),
None => {
// Schedule delayed search if we ran into our search limit.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
if !self.scheduler.scheduled(timer_id) {
let event = Event::new(EventType::SearchNext, self.display.window.id());
self.scheduler.schedule(event, TYPING_SEARCH_DELAY, false, timer_id);
}
// Clear focused match.
self.search_state.focused_match = None;
},
}
*self.dirty = true;
}
/// Cleanup the search state.
fn exit_search(&mut self) {
let vi_mode = self.terminal.mode().contains(TermMode::VI);
self.window().set_ime_allowed(!vi_mode);
self.display.damage_tracker.frame().mark_fully_damaged();
self.display.pending_update.dirty = true;
self.search_state.history_index = None;
// Clear focused match.
self.search_state.focused_match = None;
}
/// Update the cursor blinking state.
fn update_cursor_blinking(&mut self) {
// Get config cursor style.
let mut cursor_style = self.config.cursor.style;
let vi_mode = self.terminal.mode().contains(TermMode::VI);
if vi_mode {
cursor_style = self.config.cursor.vi_mode_style.unwrap_or(cursor_style);
}
// Check terminal cursor style.
let terminal_blinking = self.terminal.cursor_style().blinking;
let mut blinking = cursor_style.blinking_override().unwrap_or(terminal_blinking);
blinking &= (vi_mode || self.terminal().mode().contains(TermMode::SHOW_CURSOR))
&& self.display().ime.preedit().is_none();
// Update cursor blinking state.
let window_id = self.display.window.id();
self.scheduler.unschedule(TimerId::new(Topic::BlinkCursor, window_id));
self.scheduler.unschedule(TimerId::new(Topic::BlinkTimeout, window_id));
// Reset blinking timeout.
*self.cursor_blink_timed_out = false;
if blinking && self.terminal.is_focused {
self.schedule_blinking();
self.schedule_blinking_timeout();
} else {
self.display.cursor_hidden = false;
*self.dirty = true;
}
}
fn schedule_blinking(&mut self) {
let window_id = self.display.window.id();
let timer_id = TimerId::new(Topic::BlinkCursor, window_id);
let event = Event::new(EventType::BlinkCursor, window_id);
let blinking_interval = Duration::from_millis(self.config.cursor.blink_interval());
self.scheduler.schedule(event, blinking_interval, true, timer_id);
}
fn schedule_blinking_timeout(&mut self) {
let blinking_timeout = self.config.cursor.blink_timeout();
if blinking_timeout == Duration::ZERO {
return;
}
let window_id = self.display.window.id();
let event = Event::new(EventType::BlinkCursorTimeout, window_id);
let timer_id = TimerId::new(Topic::BlinkTimeout, window_id);
self.scheduler.schedule(event, blinking_timeout, false, timer_id);
}
/// Perform vi mode inline search in the specified direction.
fn inline_search(&mut self, direction: Direction) {
let c = match self.inline_search_state.character {
Some(c) => c,
None => return,
};
let mut buf = [0; 4];
let search_character = c.encode_utf8(&mut buf);
// Find next match in this line.
let vi_point = self.terminal.vi_mode_cursor.point;
let point = match direction {
Direction::Right => self.terminal.inline_search_right(vi_point, search_character),
Direction::Left => self.terminal.inline_search_left(vi_point, search_character),
};
// Jump to point if there's a match.
if let Ok(mut point) = point {
if self.inline_search_state.stop_short {
let grid = self.terminal.grid();
point = match direction {
Direction::Right => {
grid.iter_from(point).prev().map_or(point, |cell| cell.point)
},
Direction::Left => {
grid.iter_from(point).next().map_or(point, |cell| cell.point)
},
};
}
self.terminal.vi_goto_point(point);
self.mark_dirty();
}
}
}
/// Identified purpose of the touch input.
#[derive(Debug)]
pub enum TouchPurpose {
None,
Select(TouchEvent),
Scroll(TouchEvent),
Zoom(TouchZoom),
Tap(TouchEvent),
Invalid(HashSet<u64, RandomState>),
}
impl Default for TouchPurpose {
fn default() -> Self {
Self::None
}
}
/// Touch zooming state.
#[derive(Debug)]
pub struct TouchZoom {
slots: (TouchEvent, TouchEvent),
fractions: f32,
}
impl TouchZoom {
pub fn new(slots: (TouchEvent, TouchEvent)) -> Self {
Self { slots, fractions: Default::default() }
}
/// Get slot distance change since last update.
pub fn font_delta(&mut self, slot: TouchEvent) -> f32 {
let old_distance = self.distance();
// Update touch slots.
if slot.id == self.slots.0.id {
self.slots.0 = slot;
} else {
self.slots.1 = slot;
}
// Calculate font change in `FONT_SIZE_STEP` increments.
let delta = (self.distance() - old_distance) * TOUCH_ZOOM_FACTOR + self.fractions;
let font_delta = (delta.abs() / FONT_SIZE_STEP).floor() * FONT_SIZE_STEP * delta.signum();
self.fractions = delta - font_delta;
font_delta
}
/// Get active touch slots.
pub fn slots(&self) -> HashSet<u64, RandomState> {
let mut set = HashSet::default();
set.insert(self.slots.0.id);
set.insert(self.slots.1.id);
set
}
/// Calculate distance between slots.
fn distance(&self) -> f32 {
let delta_x = self.slots.0.location.x - self.slots.1.location.x;
let delta_y = self.slots.0.location.y - self.slots.1.location.y;
delta_x.hypot(delta_y) as f32
}
}
/// State of the mouse.
#[derive(Debug)]
pub struct Mouse {
pub left_button_state: ElementState,
pub middle_button_state: ElementState,
pub right_button_state: ElementState,
pub last_click_timestamp: Instant,
pub last_click_button: MouseButton,
pub click_state: ClickState,
pub accumulated_scroll: AccumulatedScroll,
pub cell_side: Side,
pub block_hint_launcher: bool,
pub hint_highlight_dirty: bool,
pub inside_text_area: bool,
pub x: usize,
pub y: usize,
}
impl Default for Mouse {
fn default() -> Mouse {
Mouse {
last_click_timestamp: Instant::now(),
last_click_button: MouseButton::Left,
left_button_state: ElementState::Released,
middle_button_state: ElementState::Released,
right_button_state: ElementState::Released,
click_state: ClickState::None,
cell_side: Side::Left,
hint_highlight_dirty: Default::default(),
block_hint_launcher: Default::default(),
inside_text_area: Default::default(),
accumulated_scroll: Default::default(),
x: Default::default(),
y: Default::default(),
}
}
}
impl Mouse {
/// Convert mouse pixel coordinates to viewport point.
///
/// If the coordinates are outside of the terminal grid, like positions inside the padding, the
/// coordinates will be clamped to the closest grid coordinates.
#[inline]
pub fn point(&self, size: &SizeInfo, display_offset: usize) -> Point {
let col = self.x.saturating_sub(size.padding_x() as usize) / (size.cell_width() as usize);
let col = min(Column(col), size.last_column());
let line = self.y.saturating_sub(size.padding_y() as usize) / (size.cell_height() as usize);
let line = min(line, size.bottommost_line().0 as usize);
term::viewport_to_point(display_offset, Point::new(line, col))
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum ClickState {
None,
Click,
DoubleClick,
TripleClick,
}
/// The amount of scroll accumulated from the pointer events.
#[derive(Default, Debug)]
pub struct AccumulatedScroll {
/// Scroll we should perform along `x` axis.
pub x: f64,
/// Scroll we should perform along `y` axis.
pub y: f64,
}
impl input::Processor<EventProxy, ActionContext<'_, Notifier, EventProxy>> {
/// Handle events from winit.
pub fn handle_event(&mut self, event: WinitEvent<Event>) {
match event {
WinitEvent::UserEvent(Event { payload, .. }) => match payload {
EventType::SearchNext => self.ctx.goto_match(None),
EventType::Scroll(scroll) => self.ctx.scroll(scroll),
EventType::BlinkCursor => {
// Only change state when timeout isn't reached, since we could get
// BlinkCursor and BlinkCursorTimeout events at the same time.
if !*self.ctx.cursor_blink_timed_out {
self.ctx.display.cursor_hidden ^= true;
*self.ctx.dirty = true;
}
},
EventType::BlinkCursorTimeout => {
// Disable blinking after timeout reached.
let timer_id = TimerId::new(Topic::BlinkCursor, self.ctx.display.window.id());
self.ctx.scheduler.unschedule(timer_id);
*self.ctx.cursor_blink_timed_out = true;
self.ctx.display.cursor_hidden = false;
*self.ctx.dirty = true;
},
// Add message only if it's not already queued.
EventType::Message(message) if !self.ctx.message_buffer.is_queued(&message) => {
self.ctx.message_buffer.push(message);
self.ctx.display.pending_update.dirty = true;
},
EventType::Terminal(event) => match event {
TerminalEvent::Title(title) => {
if !self.ctx.preserve_title && self.ctx.config.window.dynamic_title {
self.ctx.window().set_title(title);
}
},
TerminalEvent::ResetTitle => {
let window_config = &self.ctx.config.window;
if !self.ctx.preserve_title && window_config.dynamic_title {
self.ctx.display.window.set_title(window_config.identity.title.clone());
}
},
TerminalEvent::Bell => {
// Set window urgency hint when window is not focused.
let focused = self.ctx.terminal.is_focused;
if !focused && self.ctx.terminal.mode().contains(TermMode::URGENCY_HINTS) {
self.ctx.window().set_urgent(true);
}
// Ring visual bell.
self.ctx.display.visual_bell.ring();
// Execute bell command.
if let Some(bell_command) = &self.ctx.config.bell.command {
self.ctx.spawn_daemon(bell_command.program(), bell_command.args());
}
},
TerminalEvent::ClipboardStore(clipboard_type, content) => {
if self.ctx.terminal.is_focused {
self.ctx.clipboard.store(clipboard_type, content);
}
},
TerminalEvent::ClipboardLoad(clipboard_type, format) => {
if self.ctx.terminal.is_focused {
let text = format(self.ctx.clipboard.load(clipboard_type).as_str());
self.ctx.write_to_pty(text.into_bytes());
}
},
TerminalEvent::ColorRequest(index, format) => {
let color = match self.ctx.terminal().colors()[index] {
Some(color) => Rgb(color),
// Ignore cursor color requests unless it was changed.
None if index == NamedColor::Cursor as usize => return,
None => self.ctx.display.colors[index],
};
self.ctx.write_to_pty(format(color.0).into_bytes());
},
TerminalEvent::TextAreaSizeRequest(format) => {
let text = format(self.ctx.size_info().into());
self.ctx.write_to_pty(text.into_bytes());
},
TerminalEvent::PtyWrite(text) => self.ctx.write_to_pty(text.into_bytes()),
TerminalEvent::MouseCursorDirty => self.reset_mouse_cursor(),
TerminalEvent::CursorBlinkingChange => self.ctx.update_cursor_blinking(),
TerminalEvent::Exit | TerminalEvent::ChildExit(_) | TerminalEvent::Wakeup => (),
},
#[cfg(unix)]
EventType::IpcConfig(_) => (),
EventType::Message(_)
| EventType::ConfigReload(_)
| EventType::CreateWindow(_)
| EventType::Frame => (),
},
WinitEvent::WindowEvent { event, .. } => {
match event {
WindowEvent::CloseRequested => {
// User asked to close the window, so no need to hold it.
self.ctx.window().hold = false;
self.ctx.terminal.exit();
},
WindowEvent::ScaleFactorChanged { scale_factor, .. } => {
let old_scale_factor =
mem::replace(&mut self.ctx.window().scale_factor, scale_factor);
let display_update_pending = &mut self.ctx.display.pending_update;
// Rescale font size for the new factor.
let font_scale = scale_factor as f32 / old_scale_factor as f32;
self.ctx.display.font_size = self.ctx.display.font_size.scale(font_scale);
let font = self.ctx.config.font.clone();
display_update_pending.set_font(font.with_size(self.ctx.display.font_size));
},
WindowEvent::Resized(size) => {
// Ignore resize events to zero in any dimension, to avoid issues with Winit
// and the ConPTY. A 0x0 resize will also occur when the window is minimized
// on Windows.
if size.width == 0 || size.height == 0 {
return;
}
self.ctx.display.pending_update.set_dimensions(size);
},
WindowEvent::KeyboardInput { event, is_synthetic: false, .. } => {
self.key_input(event);
},
WindowEvent::ModifiersChanged(modifiers) => self.modifiers_input(modifiers),
WindowEvent::MouseInput { state, button, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_input(state, button);
},
WindowEvent::CursorMoved { position, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_moved(position);
},
WindowEvent::MouseWheel { delta, phase, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_wheel_input(delta, phase);
},
WindowEvent::Touch(touch) => self.touch(touch),
WindowEvent::Focused(is_focused) => {
self.ctx.terminal.is_focused = is_focused;
// When the unfocused hollow is used we must redraw on focus change.
if self.ctx.config.cursor.unfocused_hollow {
*self.ctx.dirty = true;
}
// Reset the urgency hint when gaining focus.
if is_focused {
self.ctx.window().set_urgent(false);
}
self.ctx.update_cursor_blinking();
self.on_focus_change(is_focused);
},
WindowEvent::Occluded(occluded) => {
*self.ctx.occluded = occluded;
},
WindowEvent::DroppedFile(path) => {
let path: String = path.to_string_lossy().into();
self.ctx.paste(&(path + " "), true);
},
WindowEvent::CursorLeft { .. } => {
self.ctx.mouse.inside_text_area = false;
if self.ctx.display().highlighted_hint.is_some() {
*self.ctx.dirty = true;
}
},
WindowEvent::Ime(ime) => match ime {
Ime::Commit(text) => {
*self.ctx.dirty = true;
// Don't use bracketed paste for single char input.
self.ctx.paste(&text, text.chars().count() > 1);
self.ctx.update_cursor_blinking();
},
Ime::Preedit(text, cursor_offset) => {
let preedit =
(!text.is_empty()).then(|| Preedit::new(text, cursor_offset));
if self.ctx.display.ime.preedit() != preedit.as_ref() {
self.ctx.display.ime.set_preedit(preedit);
self.ctx.update_cursor_blinking();
*self.ctx.dirty = true;
}
},
Ime::Enabled => {
self.ctx.display.ime.set_enabled(true);
*self.ctx.dirty = true;
},
Ime::Disabled => {
self.ctx.display.ime.set_enabled(false);
*self.ctx.dirty = true;
},
},
WindowEvent::KeyboardInput { is_synthetic: true, .. }
| WindowEvent::ActivationTokenDone { .. }
| WindowEvent::DoubleTapGesture { .. }
| WindowEvent::TouchpadPressure { .. }
| WindowEvent::RotationGesture { .. }
| WindowEvent::CursorEntered { .. }
| WindowEvent::PinchGesture { .. }
| WindowEvent::AxisMotion { .. }
| WindowEvent::PanGesture { .. }
| WindowEvent::HoveredFileCancelled
| WindowEvent::Destroyed
| WindowEvent::ThemeChanged(_)
| WindowEvent::HoveredFile(_)
| WindowEvent::RedrawRequested
| WindowEvent::Moved(_) => (),
}
},
WinitEvent::Suspended
| WinitEvent::NewEvents { .. }
| WinitEvent::DeviceEvent { .. }
| WinitEvent::LoopExiting
| WinitEvent::Resumed
| WinitEvent::MemoryWarning
| WinitEvent::AboutToWait => (),
}
}
}
#[derive(Debug, Clone)]
pub struct EventProxy {
proxy: EventLoopProxy<Event>,
window_id: WindowId,
}
impl EventProxy {
pub fn new(proxy: EventLoopProxy<Event>, window_id: WindowId) -> Self {
Self { proxy, window_id }
}
/// Send an event to the event loop.
pub fn send_event(&self, event: EventType) {
let _ = self.proxy.send_event(Event::new(event, self.window_id));
}
}
impl EventListener for EventProxy {
fn send_event(&self, event: TerminalEvent) {
let _ = self.proxy.send_event(Event::new(event.into(), self.window_id));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct UiConfig {\n /// Miscellaneous configuration options.\n pub general: General,\n\n /// Extra environment variables.\n pub env: HashMap<String, String>,\n\n /// How much scrolling history to keep.\n pub scrolling: Scrolling,\n\n /// Cursor configuration.\n pub cursor: Cursor,\n\n /// Selection configuration.\n pub selection: Selection,\n\n /// Font configuration.\n pub font: Font,\n\n /// Window configuration.\n pub window: WindowConfig,\n\n /// Mouse configuration.\n pub mouse: Mouse,\n\n /// Debug options.\n pub debug: Debug,\n\n /// Bell configuration.\n pub bell: BellConfig,\n\n /// RGB values for colors.\n pub colors: Colors,\n\n /// Path where config was loaded from.\n #[config(skip)]\n pub config_paths: Vec<PathBuf>,\n\n /// Regex hints for interacting with terminal content.\n pub hints: Hints,\n\n /// Config for the alacritty_terminal itself.\n pub terminal: Terminal,\n\n /// Keyboard configuration.\n keyboard: Keyboard,\n\n /// Path to a shell program to run on startup.\n #[config(deprecated = \"use terminal.shell instead\")]\n shell: Option<Program>,\n\n /// Configuration file imports.\n ///\n /// This is never read since the field is directly accessed through the config's\n /// [`toml::Value`], but still present to prevent unused field warnings.\n #[config(deprecated = \"use general.import instead\")]\n import: Option<Vec<String>>,\n\n /// Shell startup directory.\n #[config(deprecated = \"use general.working_directory instead\")]\n working_directory: Option<PathBuf>,\n\n /// Live config reload.\n #[config(deprecated = \"use general.live_config_reload instead\")]\n live_config_reload: Option<bool>,\n\n /// Offer IPC through a unix socket.\n #[cfg(unix)]\n #[config(deprecated = \"use general.ipc_socket instead\")]\n pub ipc_socket: Option<bool>,\n}"
],
"name": "config",
"type": "UiConfig"
},
{
"definitions": [
"pub struct Options {\n /// Print all events to STDOUT.\n #[clap(long)]\n pub print_events: bool,\n\n /// Generates ref test.\n #[clap(long, conflicts_with(\"daemon\"))]\n pub ref_test: bool,\n\n /// X11 window ID to embed Alacritty within (decimal or hexadecimal with \"0x\" prefix).\n #[clap(long)]\n pub embed: Option<String>,\n\n /// Specify alternative configuration file [default:\n /// $XDG_CONFIG_HOME/alacritty/alacritty.toml].\n #[cfg(not(any(target_os = \"macos\", windows)))]\n #[clap(long, value_hint = ValueHint::FilePath)]\n pub config_file: Option<PathBuf>,\n\n /// Specify alternative configuration file [default: %APPDATA%\\alacritty\\alacritty.toml].\n #[cfg(windows)]\n #[clap(long, value_hint = ValueHint::FilePath)]\n pub config_file: Option<PathBuf>,\n\n /// Specify alternative configuration file [default: $HOME/.config/alacritty/alacritty.toml].\n #[cfg(target_os = \"macos\")]\n #[clap(long, value_hint = ValueHint::FilePath)]\n pub config_file: Option<PathBuf>,\n\n /// Path for IPC socket creation.\n #[cfg(unix)]\n #[clap(long, value_hint = ValueHint::FilePath)]\n pub socket: Option<PathBuf>,\n\n /// Reduces the level of verbosity (the min level is -qq).\n #[clap(short, conflicts_with(\"verbose\"), action = ArgAction::Count)]\n quiet: u8,\n\n /// Increases the level of verbosity (the max level is -vvv).\n #[clap(short, conflicts_with(\"quiet\"), action = ArgAction::Count)]\n verbose: u8,\n\n /// Do not spawn an initial window.\n #[clap(long)]\n pub daemon: bool,\n\n /// CLI options for config overrides.\n #[clap(skip)]\n pub config_options: ParsedOptions,\n\n /// Options which can be passed via IPC.\n #[clap(flatten)]\n pub window_options: WindowOptions,\n\n /// Subcommand passed to the CLI.\n #[clap(subcommand)]\n pub subcommands: Option<Subcommands>,\n}"
],
"name": "cli_options",
"type": "CliOptions"
},
{
"definitions": [
"pub struct EventLoop<T: 'static> {\n pub(crate) event_loop: platform_impl::EventLoop<T>,\n pub(crate) _marker: PhantomData<*mut ()>, // Not Send nor Sync\n}",
"pub struct EventLoop<T: 'static> {\n pub(crate) event_loop: platform_impl::EventLoop<T>,\n pub(crate) _marker: PhantomData<*mut ()>, // Not Send nor Sync\n}"
],
"name": "event_loop",
"type": "&EventLoop<Event>"
}
],
"end_line": 136,
"name": "new",
"signature": "pub fn new(\n config: UiConfig,\n cli_options: CliOptions,\n event_loop: &EventLoop<Event>,\n ) -> Processor",
"start_line": 96
} | {
"class_name": "impl Processor {\n /// Create a new event processor.\n pub fn new(\n config: UiConfig,\n cli_options: CliOptions,\n event_loop: &EventLoop<Event>,\n ) -> Processor {\n let proxy = event_loop.create_proxy();\n let scheduler = Scheduler::new(proxy.clone());\n let initial_window_options = Some(cli_options.window_options.clone());\n\n // Disable all device events, since we don't care about them.\n event_loop.listen_device_events(DeviceEvents::Never);\n\n // SAFETY: Since this takes a pointer to the winit event loop, it MUST be dropped first,\n // which is done in `loop_exiting`.\n let clipboard = unsafe { Clipboard::new(event_loop.display_handle().unwrap().as_raw()) };\n\n // Create a config monitor.\n //\n // The monitor watches the config file for changes and reloads it. Pending\n // config changes are processed in the main loop.\n let mut config_monitor = None;\n if config.live_config_reload() {\n config_monitor =\n ConfigMonitor::new(config.config_paths.clone(), event_loop.create_proxy());\n }\n\n Processor {\n initial_window_options,\n initial_window_error: None,\n cli_options,\n proxy,\n scheduler,\n gl_config: None,\n config: Rc::new(config),\n clipboard,\n windows: Default::default(),\n #[cfg(unix)]\n global_ipc_options: Default::default(),\n config_monitor,\n }\n }\n\n /// Create initial window and load GL platform.\n ///\n /// This will initialize the OpenGL Api and pick a config that\n /// will be used for the rest of the windows.\n pub fn create_initial_window(\n &mut self,\n event_loop: &ActiveEventLoop,\n window_options: WindowOptions,\n ) -> Result<(), Box<dyn Error>> {\n let window_context = WindowContext::initial(\n event_loop,\n self.proxy.clone(),\n self.config.clone(),\n window_options,\n )?;\n\n self.gl_config = Some(window_context.display.gl_context().config());\n self.windows.insert(window_context.id(), window_context);\n\n Ok(())\n }\n\n /// Create a new terminal window.\n pub fn create_window(\n &mut self,\n event_loop: &ActiveEventLoop,\n options: WindowOptions,\n ) -> Result<(), Box<dyn Error>> {\n let gl_config = self.gl_config.as_ref().unwrap();\n\n // Override config with CLI/IPC options.\n let mut config_overrides = options.config_overrides();\n #[cfg(unix)]\n config_overrides.extend_from_slice(&self.global_ipc_options);\n let mut config = self.config.clone();\n config = config_overrides.override_config_rc(config);\n\n let window_context = WindowContext::additional(\n gl_config,\n event_loop,\n self.proxy.clone(),\n config,\n options,\n config_overrides,\n )?;\n\n self.windows.insert(window_context.id(), window_context);\n Ok(())\n }\n\n /// Run the event loop.\n ///\n /// The result is exit code generate from the loop.\n pub fn run(&mut self, event_loop: EventLoop<Event>) -> Result<(), Box<dyn Error>> {\n let result = event_loop.run_app(self);\n if let Some(initial_window_error) = self.initial_window_error.take() {\n Err(initial_window_error)\n } else {\n result.map_err(Into::into)\n }\n }\n\n /// Check if an event is irrelevant and can be skipped.\n fn skip_window_event(event: &WindowEvent) -> bool {\n matches!(\n event,\n WindowEvent::KeyboardInput { is_synthetic: true, .. }\n | WindowEvent::ActivationTokenDone { .. }\n | WindowEvent::DoubleTapGesture { .. }\n | WindowEvent::TouchpadPressure { .. }\n | WindowEvent::RotationGesture { .. }\n | WindowEvent::CursorEntered { .. }\n | WindowEvent::PinchGesture { .. }\n | WindowEvent::AxisMotion { .. }\n | WindowEvent::PanGesture { .. }\n | WindowEvent::HoveredFileCancelled\n | WindowEvent::Destroyed\n | WindowEvent::ThemeChanged(_)\n | WindowEvent::HoveredFile(_)\n | WindowEvent::Moved(_)\n )\n }\n}",
"class_signature": "impl Processor"
} |
semantic_word | alacritty-master/alacritty/src/event.rs | fn semantic_word(&self, point: Point) -> String {
let terminal = self.terminal();
let grid = terminal.grid();
// Find the next semantic word boundary to the right.
let mut end = terminal.semantic_search_right(point);
// Get point at which skipping over semantic characters has led us back to the
// original character.
let start_cell = &grid[point];
let search_end = if start_cell.flags.intersects(Flags::LEADING_WIDE_CHAR_SPACER) {
point.add(terminal, Boundary::None, 2)
} else if start_cell.flags.intersects(Flags::WIDE_CHAR) {
point.add(terminal, Boundary::None, 1)
} else {
point
};
// Keep moving until we're not on top of a semantic escape character.
let semantic_chars = terminal.semantic_escape_chars();
loop {
let cell = &grid[end];
// Get cell's character, taking wide characters into account.
let c = if cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
grid[end.sub(terminal, Boundary::None, 1)].c
} else {
cell.c
};
if !semantic_chars.contains(c) {
break;
}
end = terminal.semantic_search_right(end.add(terminal, Boundary::None, 1));
// Stop if the entire grid is only semantic escape characters.
if end == search_end {
return String::new();
}
}
// Find the beginning of the semantic word.
let start = terminal.semantic_search_left(end);
terminal.bounds_to_string(start, end)
} | //! Process window events.
use crate::ConfigMonitor;
use glutin::config::GetGlConfig;
use std::borrow::Cow;
use std::cmp::min;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet, VecDeque};
use std::error::Error;
use std::ffi::OsStr;
use std::fmt::Debug;
#[cfg(not(windows))]
use std::os::unix::io::RawFd;
use std::path::PathBuf;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::{env, f32, mem};
use ahash::RandomState;
use crossfont::Size as FontSize;
use glutin::config::Config as GlutinConfig;
use glutin::display::GetGlDisplay;
use log::{debug, error, info, warn};
use winit::application::ApplicationHandler;
use winit::event::{
ElementState, Event as WinitEvent, Ime, Modifiers, MouseButton, StartCause,
Touch as TouchEvent, WindowEvent,
};
use winit::event_loop::{ActiveEventLoop, ControlFlow, DeviceEvents, EventLoop, EventLoopProxy};
use winit::raw_window_handle::HasDisplayHandle;
use winit::window::WindowId;
use alacritty_terminal::event::{Event as TerminalEvent, EventListener, Notify};
use alacritty_terminal::event_loop::Notifier;
use alacritty_terminal::grid::{BidirectionalIterator, Dimensions, Scroll};
use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point, Side};
use alacritty_terminal::selection::{Selection, SelectionType};
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::search::{Match, RegexSearch};
use alacritty_terminal::term::{self, ClipboardType, Term, TermMode};
use alacritty_terminal::vte::ansi::NamedColor;
#[cfg(unix)]
use crate::cli::{IpcConfig, ParsedOptions};
use crate::cli::{Options as CliOptions, WindowOptions};
use crate::clipboard::Clipboard;
use crate::config::ui_config::{HintAction, HintInternalAction};
use crate::config::{self, UiConfig};
#[cfg(not(windows))]
use crate::daemon::foreground_process_path;
use crate::daemon::spawn_daemon;
use crate::display::color::Rgb;
use crate::display::hint::HintMatch;
use crate::display::window::Window;
use crate::display::{Display, Preedit, SizeInfo};
use crate::input::{self, ActionContext as _, FONT_SIZE_STEP};
use crate::logging::{LOG_TARGET_CONFIG, LOG_TARGET_WINIT};
use crate::message_bar::{Message, MessageBuffer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::window_context::WindowContext;
/// Duration after the last user input until an unlimited search is performed.
pub const TYPING_SEARCH_DELAY: Duration = Duration::from_millis(500);
/// Maximum number of lines for the blocking search while still typing the search regex.
const MAX_SEARCH_WHILE_TYPING: Option<usize> = Some(1000);
/// Maximum number of search terms stored in the history.
const MAX_SEARCH_HISTORY_SIZE: usize = 255;
/// Touch zoom speed.
const TOUCH_ZOOM_FACTOR: f32 = 0.01;
/// The event processor.
///
/// Stores some state from received events and dispatches actions when they are
/// triggered.
pub struct Processor {
pub config_monitor: Option<ConfigMonitor>,
clipboard: Clipboard,
scheduler: Scheduler,
initial_window_options: Option<WindowOptions>,
initial_window_error: Option<Box<dyn Error>>,
windows: HashMap<WindowId, WindowContext, RandomState>,
proxy: EventLoopProxy<Event>,
gl_config: Option<GlutinConfig>,
#[cfg(unix)]
global_ipc_options: ParsedOptions,
cli_options: CliOptions,
config: Rc<UiConfig>,
}
impl Processor {
/// Create a new event processor.
pub fn new(
config: UiConfig,
cli_options: CliOptions,
event_loop: &EventLoop<Event>,
) -> Processor {
let proxy = event_loop.create_proxy();
let scheduler = Scheduler::new(proxy.clone());
let initial_window_options = Some(cli_options.window_options.clone());
// Disable all device events, since we don't care about them.
event_loop.listen_device_events(DeviceEvents::Never);
// SAFETY: Since this takes a pointer to the winit event loop, it MUST be dropped first,
// which is done in `loop_exiting`.
let clipboard = unsafe { Clipboard::new(event_loop.display_handle().unwrap().as_raw()) };
// Create a config monitor.
//
// The monitor watches the config file for changes and reloads it. Pending
// config changes are processed in the main loop.
let mut config_monitor = None;
if config.live_config_reload() {
config_monitor =
ConfigMonitor::new(config.config_paths.clone(), event_loop.create_proxy());
}
Processor {
initial_window_options,
initial_window_error: None,
cli_options,
proxy,
scheduler,
gl_config: None,
config: Rc::new(config),
clipboard,
windows: Default::default(),
#[cfg(unix)]
global_ipc_options: Default::default(),
config_monitor,
}
}
/// Create initial window and load GL platform.
///
/// This will initialize the OpenGL Api and pick a config that
/// will be used for the rest of the windows.
pub fn create_initial_window(
&mut self,
event_loop: &ActiveEventLoop,
window_options: WindowOptions,
) -> Result<(), Box<dyn Error>> {
let window_context = WindowContext::initial(
event_loop,
self.proxy.clone(),
self.config.clone(),
window_options,
)?;
self.gl_config = Some(window_context.display.gl_context().config());
self.windows.insert(window_context.id(), window_context);
Ok(())
}
/// Create a new terminal window.
pub fn create_window(
&mut self,
event_loop: &ActiveEventLoop,
options: WindowOptions,
) -> Result<(), Box<dyn Error>> {
let gl_config = self.gl_config.as_ref().unwrap();
// Override config with CLI/IPC options.
let mut config_overrides = options.config_overrides();
#[cfg(unix)]
config_overrides.extend_from_slice(&self.global_ipc_options);
let mut config = self.config.clone();
config = config_overrides.override_config_rc(config);
let window_context = WindowContext::additional(
gl_config,
event_loop,
self.proxy.clone(),
config,
options,
config_overrides,
)?;
self.windows.insert(window_context.id(), window_context);
Ok(())
}
/// Run the event loop.
///
/// The result is exit code generate from the loop.
pub fn run(&mut self, event_loop: EventLoop<Event>) -> Result<(), Box<dyn Error>> {
let result = event_loop.run_app(self);
if let Some(initial_window_error) = self.initial_window_error.take() {
Err(initial_window_error)
} else {
result.map_err(Into::into)
}
}
/// Check if an event is irrelevant and can be skipped.
fn skip_window_event(event: &WindowEvent) -> bool {
matches!(
event,
WindowEvent::KeyboardInput { is_synthetic: true, .. }
| WindowEvent::ActivationTokenDone { .. }
| WindowEvent::DoubleTapGesture { .. }
| WindowEvent::TouchpadPressure { .. }
| WindowEvent::RotationGesture { .. }
| WindowEvent::CursorEntered { .. }
| WindowEvent::PinchGesture { .. }
| WindowEvent::AxisMotion { .. }
| WindowEvent::PanGesture { .. }
| WindowEvent::HoveredFileCancelled
| WindowEvent::Destroyed
| WindowEvent::ThemeChanged(_)
| WindowEvent::HoveredFile(_)
| WindowEvent::Moved(_)
)
}
}
impl ApplicationHandler<Event> for Processor {
fn resumed(&mut self, _event_loop: &ActiveEventLoop) {}
fn new_events(&mut self, event_loop: &ActiveEventLoop, cause: StartCause) {
if cause != StartCause::Init || self.cli_options.daemon {
return;
}
if let Some(window_options) = self.initial_window_options.take() {
if let Err(err) = self.create_initial_window(event_loop, window_options) {
self.initial_window_error = Some(err);
event_loop.exit();
return;
}
}
info!("Initialisation complete");
}
fn window_event(
&mut self,
_event_loop: &ActiveEventLoop,
window_id: WindowId,
event: WindowEvent,
) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "{event:?}");
}
// Ignore all events we do not care about.
if Self::skip_window_event(&event) {
return;
}
let window_context = match self.windows.get_mut(&window_id) {
Some(window_context) => window_context,
None => return,
};
let is_redraw = matches!(event, WindowEvent::RedrawRequested);
window_context.handle_event(
#[cfg(target_os = "macos")]
_event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::WindowEvent { window_id, event },
);
if is_redraw {
window_context.draw(&mut self.scheduler);
}
}
fn user_event(&mut self, event_loop: &ActiveEventLoop, event: Event) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "{event:?}");
}
// Handle events which don't mandate the WindowId.
match (event.payload, event.window_id.as_ref()) {
// Process IPC config update.
#[cfg(unix)]
(EventType::IpcConfig(ipc_config), window_id) => {
// Try and parse options as toml.
let mut options = ParsedOptions::from_options(&ipc_config.options);
// Override IPC config for each window with matching ID.
for (_, window_context) in self
.windows
.iter_mut()
.filter(|(id, _)| window_id.is_none() || window_id == Some(*id))
{
if ipc_config.reset {
window_context.reset_window_config(self.config.clone());
} else {
window_context.add_window_config(self.config.clone(), &options);
}
}
// Persist global options for future windows.
if window_id.is_none() {
if ipc_config.reset {
self.global_ipc_options.clear();
} else {
self.global_ipc_options.append(&mut options);
}
}
},
(EventType::ConfigReload(path), _) => {
// Clear config logs from message bar for all terminals.
for window_context in self.windows.values_mut() {
if !window_context.message_buffer.is_empty() {
window_context.message_buffer.remove_target(LOG_TARGET_CONFIG);
window_context.display.pending_update.dirty = true;
}
}
// Load config and update each terminal.
if let Ok(config) = config::reload(&path, &mut self.cli_options) {
self.config = Rc::new(config);
// Restart config monitor if imports changed.
if let Some(monitor) = self.config_monitor.take() {
let paths = &self.config.config_paths;
self.config_monitor = if monitor.needs_restart(paths) {
monitor.shutdown();
ConfigMonitor::new(paths.clone(), self.proxy.clone())
} else {
Some(monitor)
};
}
for window_context in self.windows.values_mut() {
window_context.update_config(self.config.clone());
}
}
},
// Create a new terminal window.
(EventType::CreateWindow(options), _) => {
// XXX Ensure that no context is current when creating a new window,
// otherwise it may lock the backing buffer of the
// surface of current context when asking
// e.g. EGL on Wayland to create a new context.
for window_context in self.windows.values_mut() {
window_context.display.make_not_current();
}
if self.gl_config.is_none() {
// Handle initial window creation in daemon mode.
if let Err(err) = self.create_initial_window(event_loop, options) {
self.initial_window_error = Some(err);
event_loop.exit();
}
} else if let Err(err) = self.create_window(event_loop, options) {
error!("Could not open window: {:?}", err);
}
},
// Process events affecting all windows.
(payload, None) => {
let event = WinitEvent::UserEvent(Event::new(payload, None));
for window_context in self.windows.values_mut() {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
event.clone(),
);
}
},
(EventType::Terminal(TerminalEvent::Wakeup), Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.dirty = true;
if window_context.display.window.has_frame {
window_context.display.window.request_redraw();
}
}
},
(EventType::Terminal(TerminalEvent::Exit), Some(window_id)) => {
// Remove the closed terminal.
let window_context = match self.windows.entry(*window_id) {
// Don't exit when terminal exits if user asked to hold the window.
Entry::Occupied(window_context)
if !window_context.get().display.window.hold =>
{
window_context.remove()
},
_ => return,
};
// Unschedule pending events.
self.scheduler.unschedule_window(window_context.id());
// Shutdown if no more terminals are open.
if self.windows.is_empty() && !self.cli_options.daemon {
// Write ref tests of last window to disk.
if self.config.debug.ref_test {
window_context.write_ref_test_results();
}
event_loop.exit();
}
},
// NOTE: This event bypasses batching to minimize input latency.
(EventType::Frame, Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.display.window.has_frame = true;
if window_context.dirty {
window_context.display.window.request_redraw();
}
}
},
(payload, Some(window_id)) => {
if let Some(window_context) = self.windows.get_mut(window_id) {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::UserEvent(Event::new(payload, *window_id)),
);
}
},
};
}
fn about_to_wait(&mut self, event_loop: &ActiveEventLoop) {
if self.config.debug.print_events {
info!(target: LOG_TARGET_WINIT, "About to wait");
}
// Dispatch event to all windows.
for window_context in self.windows.values_mut() {
window_context.handle_event(
#[cfg(target_os = "macos")]
event_loop,
&self.proxy,
&mut self.clipboard,
&mut self.scheduler,
WinitEvent::AboutToWait,
);
}
// Update the scheduler after event processing to ensure
// the event loop deadline is as accurate as possible.
let control_flow = match self.scheduler.update() {
Some(instant) => ControlFlow::WaitUntil(instant),
None => ControlFlow::Wait,
};
event_loop.set_control_flow(control_flow);
}
fn exiting(&mut self, _event_loop: &ActiveEventLoop) {
if self.config.debug.print_events {
info!("Exiting the event loop");
}
match self.gl_config.take().map(|config| config.display()) {
#[cfg(not(target_os = "macos"))]
Some(glutin::display::Display::Egl(display)) => {
// Ensure that all the windows are dropped, so the destructors for
// Renderer and contexts ran.
self.windows.clear();
// SAFETY: the display is being destroyed after destroying all the
// windows, thus no attempt to access the EGL state will be made.
unsafe {
display.terminate();
}
},
_ => (),
}
// SAFETY: The clipboard must be dropped before the event loop, so use the nop clipboard
// as a safe placeholder.
mem::swap(&mut self.clipboard, &mut Clipboard::new_nop());
}
}
/// Alacritty events.
#[derive(Debug, Clone)]
pub struct Event {
/// Limit event to a specific window.
window_id: Option<WindowId>,
/// Event payload.
payload: EventType,
}
impl Event {
pub fn new<I: Into<Option<WindowId>>>(payload: EventType, window_id: I) -> Self {
Self { window_id: window_id.into(), payload }
}
}
impl From<Event> for WinitEvent<Event> {
fn from(event: Event) -> Self {
WinitEvent::UserEvent(event)
}
}
/// Alacritty events.
#[derive(Debug, Clone)]
pub enum EventType {
Terminal(TerminalEvent),
ConfigReload(PathBuf),
Message(Message),
Scroll(Scroll),
CreateWindow(WindowOptions),
#[cfg(unix)]
IpcConfig(IpcConfig),
BlinkCursor,
BlinkCursorTimeout,
SearchNext,
Frame,
}
impl From<TerminalEvent> for EventType {
fn from(event: TerminalEvent) -> Self {
Self::Terminal(event)
}
}
/// Regex search state.
pub struct SearchState {
/// Search direction.
pub direction: Direction,
/// Current position in the search history.
pub history_index: Option<usize>,
/// Change in display offset since the beginning of the search.
display_offset_delta: i32,
/// Search origin in viewport coordinates relative to original display offset.
origin: Point,
/// Focused match during active search.
focused_match: Option<Match>,
/// Search regex and history.
///
/// During an active search, the first element is the user's current input.
///
/// While going through history, the [`SearchState::history_index`] will point to the element
/// in history which is currently being previewed.
history: VecDeque<String>,
/// Compiled search automatons.
dfas: Option<RegexSearch>,
}
impl SearchState {
/// Search regex text if a search is active.
pub fn regex(&self) -> Option<&String> {
self.history_index.and_then(|index| self.history.get(index))
}
/// Direction of the search from the search origin.
pub fn direction(&self) -> Direction {
self.direction
}
/// Focused match during vi-less search.
pub fn focused_match(&self) -> Option<&Match> {
self.focused_match.as_ref()
}
/// Clear the focused match.
pub fn clear_focused_match(&mut self) {
self.focused_match = None;
}
/// Active search dfas.
pub fn dfas(&mut self) -> Option<&mut RegexSearch> {
self.dfas.as_mut()
}
/// Search regex text if a search is active.
fn regex_mut(&mut self) -> Option<&mut String> {
self.history_index.and_then(move |index| self.history.get_mut(index))
}
}
impl Default for SearchState {
fn default() -> Self {
Self {
direction: Direction::Right,
display_offset_delta: Default::default(),
focused_match: Default::default(),
history_index: Default::default(),
history: Default::default(),
origin: Default::default(),
dfas: Default::default(),
}
}
}
/// Vi inline search state.
pub struct InlineSearchState {
/// Whether inline search is currently waiting for search character input.
pub char_pending: bool,
pub character: Option<char>,
direction: Direction,
stop_short: bool,
}
impl Default for InlineSearchState {
fn default() -> Self {
Self {
direction: Direction::Right,
char_pending: Default::default(),
stop_short: Default::default(),
character: Default::default(),
}
}
}
pub struct ActionContext<'a, N, T> {
pub notifier: &'a mut N,
pub terminal: &'a mut Term<T>,
pub clipboard: &'a mut Clipboard,
pub mouse: &'a mut Mouse,
pub touch: &'a mut TouchPurpose,
pub modifiers: &'a mut Modifiers,
pub display: &'a mut Display,
pub message_buffer: &'a mut MessageBuffer,
pub config: &'a UiConfig,
pub cursor_blink_timed_out: &'a mut bool,
#[cfg(target_os = "macos")]
pub event_loop: &'a ActiveEventLoop,
pub event_proxy: &'a EventLoopProxy<Event>,
pub scheduler: &'a mut Scheduler,
pub search_state: &'a mut SearchState,
pub inline_search_state: &'a mut InlineSearchState,
pub dirty: &'a mut bool,
pub occluded: &'a mut bool,
pub preserve_title: bool,
#[cfg(not(windows))]
pub master_fd: RawFd,
#[cfg(not(windows))]
pub shell_pid: u32,
}
impl<'a, N: Notify + 'a, T: EventListener> input::ActionContext<T> for ActionContext<'a, N, T> {
#[inline]
fn write_to_pty<B: Into<Cow<'static, [u8]>>>(&self, val: B) {
self.notifier.notify(val);
}
/// Request a redraw.
#[inline]
fn mark_dirty(&mut self) {
*self.dirty = true;
}
#[inline]
fn size_info(&self) -> SizeInfo {
self.display.size_info
}
fn scroll(&mut self, scroll: Scroll) {
let old_offset = self.terminal.grid().display_offset() as i32;
let old_vi_cursor = self.terminal.vi_mode_cursor;
self.terminal.scroll_display(scroll);
let lines_changed = old_offset - self.terminal.grid().display_offset() as i32;
// Keep track of manual display offset changes during search.
if self.search_active() {
self.search_state.display_offset_delta += lines_changed;
}
let vi_mode = self.terminal.mode().contains(TermMode::VI);
// Update selection.
if vi_mode && self.terminal.selection.as_ref().is_some_and(|s| !s.is_empty()) {
self.update_selection(self.terminal.vi_mode_cursor.point, Side::Right);
} else if self.mouse.left_button_state == ElementState::Pressed
|| self.mouse.right_button_state == ElementState::Pressed
{
let display_offset = self.terminal.grid().display_offset();
let point = self.mouse.point(&self.size_info(), display_offset);
self.update_selection(point, self.mouse.cell_side);
}
// Scrolling inside Vi mode moves the cursor, so start typing.
if vi_mode {
self.on_typing_start();
}
// Update dirty if actually scrolled or moved Vi cursor in Vi mode.
*self.dirty |=
lines_changed != 0 || (vi_mode && old_vi_cursor != self.terminal.vi_mode_cursor);
}
// Copy text selection.
fn copy_selection(&mut self, ty: ClipboardType) {
let text = match self.terminal.selection_to_string().filter(|s| !s.is_empty()) {
Some(text) => text,
None => return,
};
if ty == ClipboardType::Selection && self.config.selection.save_to_clipboard {
self.clipboard.store(ClipboardType::Clipboard, text.clone());
}
self.clipboard.store(ty, text);
}
fn selection_is_empty(&self) -> bool {
self.terminal.selection.as_ref().map_or(true, Selection::is_empty)
}
fn clear_selection(&mut self) {
// Clear the selection on the terminal.
let selection = self.terminal.selection.take();
// Mark the terminal as dirty when selection wasn't empty.
*self.dirty |= selection.is_some_and(|s| !s.is_empty());
}
fn update_selection(&mut self, mut point: Point, side: Side) {
let mut selection = match self.terminal.selection.take() {
Some(selection) => selection,
None => return,
};
// Treat motion over message bar like motion over the last line.
point.line = min(point.line, self.terminal.bottommost_line());
// Update selection.
selection.update(point, side);
// Move vi cursor and expand selection.
if self.terminal.mode().contains(TermMode::VI) && !self.search_active() {
self.terminal.vi_mode_cursor.point = point;
selection.include_all();
}
self.terminal.selection = Some(selection);
*self.dirty = true;
}
fn start_selection(&mut self, ty: SelectionType, point: Point, side: Side) {
self.terminal.selection = Some(Selection::new(ty, point, side));
*self.dirty = true;
self.copy_selection(ClipboardType::Selection);
}
fn toggle_selection(&mut self, ty: SelectionType, point: Point, side: Side) {
match &mut self.terminal.selection {
Some(selection) if selection.ty == ty && !selection.is_empty() => {
self.clear_selection();
},
Some(selection) if !selection.is_empty() => {
selection.ty = ty;
*self.dirty = true;
self.copy_selection(ClipboardType::Selection);
},
_ => self.start_selection(ty, point, side),
}
}
#[inline]
fn mouse_mode(&self) -> bool {
self.terminal.mode().intersects(TermMode::MOUSE_MODE)
&& !self.terminal.mode().contains(TermMode::VI)
}
#[inline]
fn mouse_mut(&mut self) -> &mut Mouse {
self.mouse
}
#[inline]
fn mouse(&self) -> &Mouse {
self.mouse
}
#[inline]
fn touch_purpose(&mut self) -> &mut TouchPurpose {
self.touch
}
#[inline]
fn modifiers(&mut self) -> &mut Modifiers {
self.modifiers
}
#[inline]
fn window(&mut self) -> &mut Window {
&mut self.display.window
}
#[inline]
fn display(&mut self) -> &mut Display {
self.display
}
#[inline]
fn terminal(&self) -> &Term<T> {
self.terminal
}
#[inline]
fn terminal_mut(&mut self) -> &mut Term<T> {
self.terminal
}
fn spawn_new_instance(&mut self) {
let mut env_args = env::args();
let alacritty = env_args.next().unwrap();
let mut args: Vec<String> = Vec::new();
// Reuse the arguments passed to Alacritty for the new instance.
#[allow(clippy::while_let_on_iterator)]
while let Some(arg) = env_args.next() {
// New instances shouldn't inherit command.
if arg == "-e" || arg == "--command" {
break;
}
// On unix, the working directory of the foreground shell is used by `start_daemon`.
#[cfg(not(windows))]
if arg == "--working-directory" {
let _ = env_args.next();
continue;
}
args.push(arg);
}
self.spawn_daemon(&alacritty, &args);
}
#[cfg(not(windows))]
fn create_new_window(&mut self, #[cfg(target_os = "macos")] tabbing_id: Option<String>) {
let mut options = WindowOptions::default();
options.terminal_options.working_directory =
foreground_process_path(self.master_fd, self.shell_pid).ok();
#[cfg(target_os = "macos")]
{
options.window_tabbing_id = tabbing_id;
}
let _ = self.event_proxy.send_event(Event::new(EventType::CreateWindow(options), None));
}
#[cfg(windows)]
fn create_new_window(&mut self) {
let _ = self
.event_proxy
.send_event(Event::new(EventType::CreateWindow(WindowOptions::default()), None));
}
fn spawn_daemon<I, S>(&self, program: &str, args: I)
where
I: IntoIterator<Item = S> + Debug + Copy,
S: AsRef<OsStr>,
{
#[cfg(not(windows))]
let result = spawn_daemon(program, args, self.master_fd, self.shell_pid);
#[cfg(windows)]
let result = spawn_daemon(program, args);
match result {
Ok(_) => debug!("Launched {} with args {:?}", program, args),
Err(err) => warn!("Unable to launch {program} with args {args:?}: {err}"),
}
}
fn change_font_size(&mut self, delta: f32) {
// Round to pick integral px steps, since fonts look better on them.
let new_size = self.display.font_size.as_px().round() + delta;
self.display.font_size = FontSize::from_px(new_size);
let font = self.config.font.clone().with_size(self.display.font_size);
self.display.pending_update.set_font(font);
}
fn reset_font_size(&mut self) {
let scale_factor = self.display.window.scale_factor as f32;
self.display.font_size = self.config.font.size().scale(scale_factor);
self.display
.pending_update
.set_font(self.config.font.clone().with_size(self.display.font_size));
}
#[inline]
fn pop_message(&mut self) {
if !self.message_buffer.is_empty() {
self.display.pending_update.dirty = true;
self.message_buffer.pop();
}
}
#[inline]
fn start_search(&mut self, direction: Direction) {
// Only create new history entry if the previous regex wasn't empty.
if self.search_state.history.front().map_or(true, |regex| !regex.is_empty()) {
self.search_state.history.push_front(String::new());
self.search_state.history.truncate(MAX_SEARCH_HISTORY_SIZE);
}
self.search_state.history_index = Some(0);
self.search_state.direction = direction;
self.search_state.focused_match = None;
// Store original search position as origin and reset location.
if self.terminal.mode().contains(TermMode::VI) {
self.search_state.origin = self.terminal.vi_mode_cursor.point;
self.search_state.display_offset_delta = 0;
// Adjust origin for content moving upward on search start.
if self.terminal.grid().cursor.point.line + 1 == self.terminal.screen_lines() {
self.search_state.origin.line -= 1;
}
} else {
let viewport_top = Line(-(self.terminal.grid().display_offset() as i32)) - 1;
let viewport_bottom = viewport_top + self.terminal.bottommost_line();
let last_column = self.terminal.last_column();
self.search_state.origin = match direction {
Direction::Right => Point::new(viewport_top, Column(0)),
Direction::Left => Point::new(viewport_bottom, last_column),
};
}
// Enable IME so we can input into the search bar with it if we were in Vi mode.
self.window().set_ime_allowed(true);
self.display.damage_tracker.frame().mark_fully_damaged();
self.display.pending_update.dirty = true;
}
#[inline]
fn start_seeded_search(&mut self, direction: Direction, text: String) {
let origin = self.terminal.vi_mode_cursor.point;
// Start new search.
self.clear_selection();
self.start_search(direction);
// Enter initial selection text.
for c in text.chars() {
if let '$' | '('..='+' | '?' | '['..='^' | '{'..='}' = c {
self.search_input('\\');
}
self.search_input(c);
}
// Leave search mode.
self.confirm_search();
if !self.terminal.mode().contains(TermMode::VI) {
return;
}
// Find the target vi cursor point by going to the next match to the right of the origin,
// then jump to the next search match in the target direction.
let target = self.search_next(origin, Direction::Right, Side::Right).and_then(|rm| {
let regex_match = match direction {
Direction::Right => {
let origin = rm.end().add(self.terminal, Boundary::None, 1);
self.search_next(origin, Direction::Right, Side::Left)?
},
Direction::Left => {
let origin = rm.start().sub(self.terminal, Boundary::None, 1);
self.search_next(origin, Direction::Left, Side::Left)?
},
};
Some(*regex_match.start())
});
// Move the vi cursor to the target position.
if let Some(target) = target {
self.terminal_mut().vi_goto_point(target);
self.mark_dirty();
}
}
#[inline]
fn confirm_search(&mut self) {
// Just cancel search when not in vi mode.
if !self.terminal.mode().contains(TermMode::VI) {
self.cancel_search();
return;
}
// Force unlimited search if the previous one was interrupted.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
if self.scheduler.scheduled(timer_id) {
self.goto_match(None);
}
self.exit_search();
}
#[inline]
fn cancel_search(&mut self) {
if self.terminal.mode().contains(TermMode::VI) {
// Recover pre-search state in vi mode.
self.search_reset_state();
} else if let Some(focused_match) = &self.search_state.focused_match {
// Create a selection for the focused match.
let start = *focused_match.start();
let end = *focused_match.end();
self.start_selection(SelectionType::Simple, start, Side::Left);
self.update_selection(end, Side::Right);
self.copy_selection(ClipboardType::Selection);
}
self.search_state.dfas = None;
self.exit_search();
}
#[inline]
fn search_input(&mut self, c: char) {
match self.search_state.history_index {
Some(0) => (),
// When currently in history, replace active regex with history on change.
Some(index) => {
self.search_state.history[0] = self.search_state.history[index].clone();
self.search_state.history_index = Some(0);
},
None => return,
}
let regex = &mut self.search_state.history[0];
match c {
// Handle backspace/ctrl+h.
'\x08' | '\x7f' => {
let _ = regex.pop();
},
// Add ascii and unicode text.
' '..='~' | '\u{a0}'..='\u{10ffff}' => regex.push(c),
// Ignore non-printable characters.
_ => return,
}
if !self.terminal.mode().contains(TermMode::VI) {
// Clear selection so we do not obstruct any matches.
self.terminal.selection = None;
}
self.update_search();
}
#[inline]
fn search_pop_word(&mut self) {
if let Some(regex) = self.search_state.regex_mut() {
*regex = regex.trim_end().to_owned();
regex.truncate(regex.rfind(' ').map_or(0, |i| i + 1));
self.update_search();
}
}
/// Go to the previous regex in the search history.
#[inline]
fn search_history_previous(&mut self) {
let index = match &mut self.search_state.history_index {
None => return,
Some(index) if *index + 1 >= self.search_state.history.len() => return,
Some(index) => index,
};
*index += 1;
self.update_search();
}
/// Go to the previous regex in the search history.
#[inline]
fn search_history_next(&mut self) {
let index = match &mut self.search_state.history_index {
Some(0) | None => return,
Some(index) => index,
};
*index -= 1;
self.update_search();
}
#[inline]
fn advance_search_origin(&mut self, direction: Direction) {
// Use focused match as new search origin if available.
if let Some(focused_match) = &self.search_state.focused_match {
let new_origin = match direction {
Direction::Right => focused_match.end().add(self.terminal, Boundary::None, 1),
Direction::Left => focused_match.start().sub(self.terminal, Boundary::None, 1),
};
self.terminal.scroll_to_point(new_origin);
self.search_state.display_offset_delta = 0;
self.search_state.origin = new_origin;
}
// Search for the next match using the supplied direction.
let search_direction = mem::replace(&mut self.search_state.direction, direction);
self.goto_match(None);
self.search_state.direction = search_direction;
// If we found a match, we set the search origin right in front of it to make sure that
// after modifications to the regex the search is started without moving the focused match
// around.
let focused_match = match &self.search_state.focused_match {
Some(focused_match) => focused_match,
None => return,
};
// Set new origin to the left/right of the match, depending on search direction.
let new_origin = match self.search_state.direction {
Direction::Right => *focused_match.start(),
Direction::Left => *focused_match.end(),
};
// Store the search origin with display offset by checking how far we need to scroll to it.
let old_display_offset = self.terminal.grid().display_offset() as i32;
self.terminal.scroll_to_point(new_origin);
let new_display_offset = self.terminal.grid().display_offset() as i32;
self.search_state.display_offset_delta = new_display_offset - old_display_offset;
// Store origin and scroll back to the match.
self.terminal.scroll_display(Scroll::Delta(-self.search_state.display_offset_delta));
self.search_state.origin = new_origin;
}
/// Find the next search match.
fn search_next(&mut self, origin: Point, direction: Direction, side: Side) -> Option<Match> {
self.search_state
.dfas
.as_mut()
.and_then(|dfas| self.terminal.search_next(dfas, origin, direction, side, None))
}
#[inline]
fn search_direction(&self) -> Direction {
self.search_state.direction
}
#[inline]
fn search_active(&self) -> bool {
self.search_state.history_index.is_some()
}
/// Handle keyboard typing start.
///
/// This will temporarily disable some features like terminal cursor blinking or the mouse
/// cursor.
///
/// All features are re-enabled again automatically.
#[inline]
fn on_typing_start(&mut self) {
// Disable cursor blinking.
let timer_id = TimerId::new(Topic::BlinkCursor, self.display.window.id());
if self.scheduler.unschedule(timer_id).is_some() {
self.schedule_blinking();
// Mark the cursor as visible and queue redraw if the cursor was hidden.
if mem::take(&mut self.display.cursor_hidden) {
*self.dirty = true;
}
} else if *self.cursor_blink_timed_out {
self.update_cursor_blinking();
}
// Hide mouse cursor.
if self.config.mouse.hide_when_typing {
self.display.window.set_mouse_visible(false);
}
}
/// Process a new character for keyboard hints.
fn hint_input(&mut self, c: char) {
if let Some(hint) = self.display.hint_state.keyboard_input(self.terminal, c) {
self.mouse.block_hint_launcher = false;
self.trigger_hint(&hint);
}
*self.dirty = true;
}
/// Trigger a hint action.
fn trigger_hint(&mut self, hint: &HintMatch) {
if self.mouse.block_hint_launcher {
return;
}
let hint_bounds = hint.bounds();
let text = match hint.text(self.terminal) {
Some(text) => text,
None => return,
};
match &hint.action() {
// Launch an external program.
HintAction::Command(command) => {
let mut args = command.args().to_vec();
args.push(text.into());
self.spawn_daemon(command.program(), &args);
},
// Copy the text to the clipboard.
HintAction::Action(HintInternalAction::Copy) => {
self.clipboard.store(ClipboardType::Clipboard, text);
},
// Write the text to the PTY/search.
HintAction::Action(HintInternalAction::Paste) => self.paste(&text, true),
// Select the text.
HintAction::Action(HintInternalAction::Select) => {
self.start_selection(SelectionType::Simple, *hint_bounds.start(), Side::Left);
self.update_selection(*hint_bounds.end(), Side::Right);
self.copy_selection(ClipboardType::Selection);
},
// Move the vi mode cursor.
HintAction::Action(HintInternalAction::MoveViModeCursor) => {
// Enter vi mode if we're not in it already.
if !self.terminal.mode().contains(TermMode::VI) {
self.terminal.toggle_vi_mode();
}
self.terminal.vi_goto_point(*hint_bounds.start());
self.mark_dirty();
},
}
}
/// Expand the selection to the current mouse cursor position.
#[inline]
fn expand_selection(&mut self) {
let control = self.modifiers().state().control_key();
let selection_type = match self.mouse().click_state {
ClickState::None => return,
_ if control => SelectionType::Block,
ClickState::Click => SelectionType::Simple,
ClickState::DoubleClick => SelectionType::Semantic,
ClickState::TripleClick => SelectionType::Lines,
};
// Load mouse point, treating message bar and padding as the closest cell.
let display_offset = self.terminal().grid().display_offset();
let point = self.mouse().point(&self.size_info(), display_offset);
let cell_side = self.mouse().cell_side;
let selection = match &mut self.terminal_mut().selection {
Some(selection) => selection,
None => return,
};
selection.ty = selection_type;
self.update_selection(point, cell_side);
// Move vi mode cursor to mouse click position.
if self.terminal().mode().contains(TermMode::VI) && !self.search_active() {
self.terminal_mut().vi_mode_cursor.point = point;
}
}
/// Get the semantic word at the specified point.
fn semantic_word(&self, point: Point) -> String {
let terminal = self.terminal();
let grid = terminal.grid();
// Find the next semantic word boundary to the right.
let mut end = terminal.semantic_search_right(point);
// Get point at which skipping over semantic characters has led us back to the
// original character.
let start_cell = &grid[point];
let search_end = if start_cell.flags.intersects(Flags::LEADING_WIDE_CHAR_SPACER) {
point.add(terminal, Boundary::None, 2)
} else if start_cell.flags.intersects(Flags::WIDE_CHAR) {
point.add(terminal, Boundary::None, 1)
} else {
point
};
// Keep moving until we're not on top of a semantic escape character.
let semantic_chars = terminal.semantic_escape_chars();
loop {
let cell = &grid[end];
// Get cell's character, taking wide characters into account.
let c = if cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
grid[end.sub(terminal, Boundary::None, 1)].c
} else {
cell.c
};
if !semantic_chars.contains(c) {
break;
}
end = terminal.semantic_search_right(end.add(terminal, Boundary::None, 1));
// Stop if the entire grid is only semantic escape characters.
if end == search_end {
return String::new();
}
}
// Find the beginning of the semantic word.
let start = terminal.semantic_search_left(end);
terminal.bounds_to_string(start, end)
}
/// Handle beginning of terminal text input.
fn on_terminal_input_start(&mut self) {
self.on_typing_start();
self.clear_selection();
if self.terminal().grid().display_offset() != 0 {
self.scroll(Scroll::Bottom);
}
}
/// Paste a text into the terminal.
fn paste(&mut self, text: &str, bracketed: bool) {
if self.search_active() {
for c in text.chars() {
self.search_input(c);
}
} else if self.inline_search_state.char_pending {
self.inline_search_input(text);
} else if bracketed && self.terminal().mode().contains(TermMode::BRACKETED_PASTE) {
self.on_terminal_input_start();
self.write_to_pty(&b"\x1b[200~"[..]);
// Write filtered escape sequences.
//
// We remove `\x1b` to ensure it's impossible for the pasted text to write the bracketed
// paste end escape `\x1b[201~` and `\x03` since some shells incorrectly terminate
// bracketed paste when they receive it.
let filtered = text.replace(['\x1b', '\x03'], "");
self.write_to_pty(filtered.into_bytes());
self.write_to_pty(&b"\x1b[201~"[..]);
} else {
self.on_terminal_input_start();
let payload = if bracketed {
// In non-bracketed (ie: normal) mode, terminal applications cannot distinguish
// pasted data from keystrokes.
//
// In theory, we should construct the keystrokes needed to produce the data we are
// pasting... since that's neither practical nor sensible (and probably an
// impossible task to solve in a general way), we'll just replace line breaks
// (windows and unix style) with a single carriage return (\r, which is what the
// Enter key produces).
text.replace("\r\n", "\r").replace('\n', "\r").into_bytes()
} else {
// When we explicitly disable bracketed paste don't manipulate with the input,
// so we pass user input as is.
text.to_owned().into_bytes()
};
self.write_to_pty(payload);
}
}
/// Toggle the vi mode status.
#[inline]
fn toggle_vi_mode(&mut self) {
let was_in_vi_mode = self.terminal.mode().contains(TermMode::VI);
if was_in_vi_mode {
// If we had search running when leaving Vi mode we should mark terminal fully damaged
// to cleanup highlighted results.
if self.search_state.dfas.take().is_some() {
self.display.damage_tracker.frame().mark_fully_damaged();
}
} else {
self.clear_selection();
}
if self.search_active() {
self.cancel_search();
}
// We don't want IME in Vi mode.
self.window().set_ime_allowed(was_in_vi_mode);
self.terminal.toggle_vi_mode();
*self.dirty = true;
}
/// Get vi inline search state.
fn inline_search_state(&mut self) -> &mut InlineSearchState {
self.inline_search_state
}
/// Start vi mode inline search.
fn start_inline_search(&mut self, direction: Direction, stop_short: bool) {
self.inline_search_state.stop_short = stop_short;
self.inline_search_state.direction = direction;
self.inline_search_state.char_pending = true;
self.inline_search_state.character = None;
}
/// Jump to the next matching character in the line.
fn inline_search_next(&mut self) {
let direction = self.inline_search_state.direction;
self.inline_search(direction);
}
/// Jump to the next matching character in the line.
fn inline_search_previous(&mut self) {
let direction = self.inline_search_state.direction.opposite();
self.inline_search(direction);
}
/// Process input during inline search.
fn inline_search_input(&mut self, text: &str) {
// Ignore input with empty text, like modifier keys.
let c = match text.chars().next() {
Some(c) => c,
None => return,
};
self.inline_search_state.char_pending = false;
self.inline_search_state.character = Some(c);
self.window().set_ime_allowed(false);
// Immediately move to the captured character.
self.inline_search_next();
}
fn message(&self) -> Option<&Message> {
self.message_buffer.message()
}
fn config(&self) -> &UiConfig {
self.config
}
#[cfg(target_os = "macos")]
fn event_loop(&self) -> &ActiveEventLoop {
self.event_loop
}
fn clipboard_mut(&mut self) -> &mut Clipboard {
self.clipboard
}
fn scheduler_mut(&mut self) -> &mut Scheduler {
self.scheduler
}
}
impl<'a, N: Notify + 'a, T: EventListener> ActionContext<'a, N, T> {
fn update_search(&mut self) {
let regex = match self.search_state.regex() {
Some(regex) => regex,
None => return,
};
// Hide cursor while typing into the search bar.
if self.config.mouse.hide_when_typing {
self.display.window.set_mouse_visible(false);
}
if regex.is_empty() {
// Stop search if there's nothing to search for.
self.search_reset_state();
self.search_state.dfas = None;
} else {
// Create search dfas for the new regex string.
self.search_state.dfas = RegexSearch::new(regex).ok();
// Update search highlighting.
self.goto_match(MAX_SEARCH_WHILE_TYPING);
}
*self.dirty = true;
}
/// Reset terminal to the state before search was started.
fn search_reset_state(&mut self) {
// Unschedule pending timers.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
self.scheduler.unschedule(timer_id);
// Clear focused match.
self.search_state.focused_match = None;
// The viewport reset logic is only needed for vi mode, since without it our origin is
// always at the current display offset instead of at the vi cursor position which we need
// to recover to.
if !self.terminal.mode().contains(TermMode::VI) {
return;
}
// Reset display offset and cursor position.
self.terminal.vi_mode_cursor.point = self.search_state.origin;
self.terminal.scroll_display(Scroll::Delta(self.search_state.display_offset_delta));
self.search_state.display_offset_delta = 0;
*self.dirty = true;
}
/// Jump to the first regex match from the search origin.
fn goto_match(&mut self, mut limit: Option<usize>) {
let dfas = match &mut self.search_state.dfas {
Some(dfas) => dfas,
None => return,
};
// Limit search only when enough lines are available to run into the limit.
limit = limit.filter(|&limit| limit <= self.terminal.total_lines());
// Jump to the next match.
let direction = self.search_state.direction;
let clamped_origin = self.search_state.origin.grid_clamp(self.terminal, Boundary::Grid);
match self.terminal.search_next(dfas, clamped_origin, direction, Side::Left, limit) {
Some(regex_match) => {
let old_offset = self.terminal.grid().display_offset() as i32;
if self.terminal.mode().contains(TermMode::VI) {
// Move vi cursor to the start of the match.
self.terminal.vi_goto_point(*regex_match.start());
} else {
// Select the match when vi mode is not active.
self.terminal.scroll_to_point(*regex_match.start());
}
// Update the focused match.
self.search_state.focused_match = Some(regex_match);
// Store number of lines the viewport had to be moved.
let display_offset = self.terminal.grid().display_offset();
self.search_state.display_offset_delta += old_offset - display_offset as i32;
// Since we found a result, we require no delayed re-search.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
self.scheduler.unschedule(timer_id);
},
// Reset viewport only when we know there is no match, to prevent unnecessary jumping.
None if limit.is_none() => self.search_reset_state(),
None => {
// Schedule delayed search if we ran into our search limit.
let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());
if !self.scheduler.scheduled(timer_id) {
let event = Event::new(EventType::SearchNext, self.display.window.id());
self.scheduler.schedule(event, TYPING_SEARCH_DELAY, false, timer_id);
}
// Clear focused match.
self.search_state.focused_match = None;
},
}
*self.dirty = true;
}
/// Cleanup the search state.
fn exit_search(&mut self) {
let vi_mode = self.terminal.mode().contains(TermMode::VI);
self.window().set_ime_allowed(!vi_mode);
self.display.damage_tracker.frame().mark_fully_damaged();
self.display.pending_update.dirty = true;
self.search_state.history_index = None;
// Clear focused match.
self.search_state.focused_match = None;
}
/// Update the cursor blinking state.
fn update_cursor_blinking(&mut self) {
// Get config cursor style.
let mut cursor_style = self.config.cursor.style;
let vi_mode = self.terminal.mode().contains(TermMode::VI);
if vi_mode {
cursor_style = self.config.cursor.vi_mode_style.unwrap_or(cursor_style);
}
// Check terminal cursor style.
let terminal_blinking = self.terminal.cursor_style().blinking;
let mut blinking = cursor_style.blinking_override().unwrap_or(terminal_blinking);
blinking &= (vi_mode || self.terminal().mode().contains(TermMode::SHOW_CURSOR))
&& self.display().ime.preedit().is_none();
// Update cursor blinking state.
let window_id = self.display.window.id();
self.scheduler.unschedule(TimerId::new(Topic::BlinkCursor, window_id));
self.scheduler.unschedule(TimerId::new(Topic::BlinkTimeout, window_id));
// Reset blinking timeout.
*self.cursor_blink_timed_out = false;
if blinking && self.terminal.is_focused {
self.schedule_blinking();
self.schedule_blinking_timeout();
} else {
self.display.cursor_hidden = false;
*self.dirty = true;
}
}
fn schedule_blinking(&mut self) {
let window_id = self.display.window.id();
let timer_id = TimerId::new(Topic::BlinkCursor, window_id);
let event = Event::new(EventType::BlinkCursor, window_id);
let blinking_interval = Duration::from_millis(self.config.cursor.blink_interval());
self.scheduler.schedule(event, blinking_interval, true, timer_id);
}
fn schedule_blinking_timeout(&mut self) {
let blinking_timeout = self.config.cursor.blink_timeout();
if blinking_timeout == Duration::ZERO {
return;
}
let window_id = self.display.window.id();
let event = Event::new(EventType::BlinkCursorTimeout, window_id);
let timer_id = TimerId::new(Topic::BlinkTimeout, window_id);
self.scheduler.schedule(event, blinking_timeout, false, timer_id);
}
/// Perform vi mode inline search in the specified direction.
fn inline_search(&mut self, direction: Direction) {
let c = match self.inline_search_state.character {
Some(c) => c,
None => return,
};
let mut buf = [0; 4];
let search_character = c.encode_utf8(&mut buf);
// Find next match in this line.
let vi_point = self.terminal.vi_mode_cursor.point;
let point = match direction {
Direction::Right => self.terminal.inline_search_right(vi_point, search_character),
Direction::Left => self.terminal.inline_search_left(vi_point, search_character),
};
// Jump to point if there's a match.
if let Ok(mut point) = point {
if self.inline_search_state.stop_short {
let grid = self.terminal.grid();
point = match direction {
Direction::Right => {
grid.iter_from(point).prev().map_or(point, |cell| cell.point)
},
Direction::Left => {
grid.iter_from(point).next().map_or(point, |cell| cell.point)
},
};
}
self.terminal.vi_goto_point(point);
self.mark_dirty();
}
}
}
/// Identified purpose of the touch input.
#[derive(Debug)]
pub enum TouchPurpose {
None,
Select(TouchEvent),
Scroll(TouchEvent),
Zoom(TouchZoom),
Tap(TouchEvent),
Invalid(HashSet<u64, RandomState>),
}
impl Default for TouchPurpose {
fn default() -> Self {
Self::None
}
}
/// Touch zooming state.
#[derive(Debug)]
pub struct TouchZoom {
slots: (TouchEvent, TouchEvent),
fractions: f32,
}
impl TouchZoom {
pub fn new(slots: (TouchEvent, TouchEvent)) -> Self {
Self { slots, fractions: Default::default() }
}
/// Get slot distance change since last update.
pub fn font_delta(&mut self, slot: TouchEvent) -> f32 {
let old_distance = self.distance();
// Update touch slots.
if slot.id == self.slots.0.id {
self.slots.0 = slot;
} else {
self.slots.1 = slot;
}
// Calculate font change in `FONT_SIZE_STEP` increments.
let delta = (self.distance() - old_distance) * TOUCH_ZOOM_FACTOR + self.fractions;
let font_delta = (delta.abs() / FONT_SIZE_STEP).floor() * FONT_SIZE_STEP * delta.signum();
self.fractions = delta - font_delta;
font_delta
}
/// Get active touch slots.
pub fn slots(&self) -> HashSet<u64, RandomState> {
let mut set = HashSet::default();
set.insert(self.slots.0.id);
set.insert(self.slots.1.id);
set
}
/// Calculate distance between slots.
fn distance(&self) -> f32 {
let delta_x = self.slots.0.location.x - self.slots.1.location.x;
let delta_y = self.slots.0.location.y - self.slots.1.location.y;
delta_x.hypot(delta_y) as f32
}
}
/// State of the mouse.
#[derive(Debug)]
pub struct Mouse {
pub left_button_state: ElementState,
pub middle_button_state: ElementState,
pub right_button_state: ElementState,
pub last_click_timestamp: Instant,
pub last_click_button: MouseButton,
pub click_state: ClickState,
pub accumulated_scroll: AccumulatedScroll,
pub cell_side: Side,
pub block_hint_launcher: bool,
pub hint_highlight_dirty: bool,
pub inside_text_area: bool,
pub x: usize,
pub y: usize,
}
impl Default for Mouse {
fn default() -> Mouse {
Mouse {
last_click_timestamp: Instant::now(),
last_click_button: MouseButton::Left,
left_button_state: ElementState::Released,
middle_button_state: ElementState::Released,
right_button_state: ElementState::Released,
click_state: ClickState::None,
cell_side: Side::Left,
hint_highlight_dirty: Default::default(),
block_hint_launcher: Default::default(),
inside_text_area: Default::default(),
accumulated_scroll: Default::default(),
x: Default::default(),
y: Default::default(),
}
}
}
impl Mouse {
/// Convert mouse pixel coordinates to viewport point.
///
/// If the coordinates are outside of the terminal grid, like positions inside the padding, the
/// coordinates will be clamped to the closest grid coordinates.
#[inline]
pub fn point(&self, size: &SizeInfo, display_offset: usize) -> Point {
let col = self.x.saturating_sub(size.padding_x() as usize) / (size.cell_width() as usize);
let col = min(Column(col), size.last_column());
let line = self.y.saturating_sub(size.padding_y() as usize) / (size.cell_height() as usize);
let line = min(line, size.bottommost_line().0 as usize);
term::viewport_to_point(display_offset, Point::new(line, col))
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum ClickState {
None,
Click,
DoubleClick,
TripleClick,
}
/// The amount of scroll accumulated from the pointer events.
#[derive(Default, Debug)]
pub struct AccumulatedScroll {
/// Scroll we should perform along `x` axis.
pub x: f64,
/// Scroll we should perform along `y` axis.
pub y: f64,
}
impl input::Processor<EventProxy, ActionContext<'_, Notifier, EventProxy>> {
/// Handle events from winit.
pub fn handle_event(&mut self, event: WinitEvent<Event>) {
match event {
WinitEvent::UserEvent(Event { payload, .. }) => match payload {
EventType::SearchNext => self.ctx.goto_match(None),
EventType::Scroll(scroll) => self.ctx.scroll(scroll),
EventType::BlinkCursor => {
// Only change state when timeout isn't reached, since we could get
// BlinkCursor and BlinkCursorTimeout events at the same time.
if !*self.ctx.cursor_blink_timed_out {
self.ctx.display.cursor_hidden ^= true;
*self.ctx.dirty = true;
}
},
EventType::BlinkCursorTimeout => {
// Disable blinking after timeout reached.
let timer_id = TimerId::new(Topic::BlinkCursor, self.ctx.display.window.id());
self.ctx.scheduler.unschedule(timer_id);
*self.ctx.cursor_blink_timed_out = true;
self.ctx.display.cursor_hidden = false;
*self.ctx.dirty = true;
},
// Add message only if it's not already queued.
EventType::Message(message) if !self.ctx.message_buffer.is_queued(&message) => {
self.ctx.message_buffer.push(message);
self.ctx.display.pending_update.dirty = true;
},
EventType::Terminal(event) => match event {
TerminalEvent::Title(title) => {
if !self.ctx.preserve_title && self.ctx.config.window.dynamic_title {
self.ctx.window().set_title(title);
}
},
TerminalEvent::ResetTitle => {
let window_config = &self.ctx.config.window;
if !self.ctx.preserve_title && window_config.dynamic_title {
self.ctx.display.window.set_title(window_config.identity.title.clone());
}
},
TerminalEvent::Bell => {
// Set window urgency hint when window is not focused.
let focused = self.ctx.terminal.is_focused;
if !focused && self.ctx.terminal.mode().contains(TermMode::URGENCY_HINTS) {
self.ctx.window().set_urgent(true);
}
// Ring visual bell.
self.ctx.display.visual_bell.ring();
// Execute bell command.
if let Some(bell_command) = &self.ctx.config.bell.command {
self.ctx.spawn_daemon(bell_command.program(), bell_command.args());
}
},
TerminalEvent::ClipboardStore(clipboard_type, content) => {
if self.ctx.terminal.is_focused {
self.ctx.clipboard.store(clipboard_type, content);
}
},
TerminalEvent::ClipboardLoad(clipboard_type, format) => {
if self.ctx.terminal.is_focused {
let text = format(self.ctx.clipboard.load(clipboard_type).as_str());
self.ctx.write_to_pty(text.into_bytes());
}
},
TerminalEvent::ColorRequest(index, format) => {
let color = match self.ctx.terminal().colors()[index] {
Some(color) => Rgb(color),
// Ignore cursor color requests unless it was changed.
None if index == NamedColor::Cursor as usize => return,
None => self.ctx.display.colors[index],
};
self.ctx.write_to_pty(format(color.0).into_bytes());
},
TerminalEvent::TextAreaSizeRequest(format) => {
let text = format(self.ctx.size_info().into());
self.ctx.write_to_pty(text.into_bytes());
},
TerminalEvent::PtyWrite(text) => self.ctx.write_to_pty(text.into_bytes()),
TerminalEvent::MouseCursorDirty => self.reset_mouse_cursor(),
TerminalEvent::CursorBlinkingChange => self.ctx.update_cursor_blinking(),
TerminalEvent::Exit | TerminalEvent::ChildExit(_) | TerminalEvent::Wakeup => (),
},
#[cfg(unix)]
EventType::IpcConfig(_) => (),
EventType::Message(_)
| EventType::ConfigReload(_)
| EventType::CreateWindow(_)
| EventType::Frame => (),
},
WinitEvent::WindowEvent { event, .. } => {
match event {
WindowEvent::CloseRequested => {
// User asked to close the window, so no need to hold it.
self.ctx.window().hold = false;
self.ctx.terminal.exit();
},
WindowEvent::ScaleFactorChanged { scale_factor, .. } => {
let old_scale_factor =
mem::replace(&mut self.ctx.window().scale_factor, scale_factor);
let display_update_pending = &mut self.ctx.display.pending_update;
// Rescale font size for the new factor.
let font_scale = scale_factor as f32 / old_scale_factor as f32;
self.ctx.display.font_size = self.ctx.display.font_size.scale(font_scale);
let font = self.ctx.config.font.clone();
display_update_pending.set_font(font.with_size(self.ctx.display.font_size));
},
WindowEvent::Resized(size) => {
// Ignore resize events to zero in any dimension, to avoid issues with Winit
// and the ConPTY. A 0x0 resize will also occur when the window is minimized
// on Windows.
if size.width == 0 || size.height == 0 {
return;
}
self.ctx.display.pending_update.set_dimensions(size);
},
WindowEvent::KeyboardInput { event, is_synthetic: false, .. } => {
self.key_input(event);
},
WindowEvent::ModifiersChanged(modifiers) => self.modifiers_input(modifiers),
WindowEvent::MouseInput { state, button, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_input(state, button);
},
WindowEvent::CursorMoved { position, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_moved(position);
},
WindowEvent::MouseWheel { delta, phase, .. } => {
self.ctx.window().set_mouse_visible(true);
self.mouse_wheel_input(delta, phase);
},
WindowEvent::Touch(touch) => self.touch(touch),
WindowEvent::Focused(is_focused) => {
self.ctx.terminal.is_focused = is_focused;
// When the unfocused hollow is used we must redraw on focus change.
if self.ctx.config.cursor.unfocused_hollow {
*self.ctx.dirty = true;
}
// Reset the urgency hint when gaining focus.
if is_focused {
self.ctx.window().set_urgent(false);
}
self.ctx.update_cursor_blinking();
self.on_focus_change(is_focused);
},
WindowEvent::Occluded(occluded) => {
*self.ctx.occluded = occluded;
},
WindowEvent::DroppedFile(path) => {
let path: String = path.to_string_lossy().into();
self.ctx.paste(&(path + " "), true);
},
WindowEvent::CursorLeft { .. } => {
self.ctx.mouse.inside_text_area = false;
if self.ctx.display().highlighted_hint.is_some() {
*self.ctx.dirty = true;
}
},
WindowEvent::Ime(ime) => match ime {
Ime::Commit(text) => {
*self.ctx.dirty = true;
// Don't use bracketed paste for single char input.
self.ctx.paste(&text, text.chars().count() > 1);
self.ctx.update_cursor_blinking();
},
Ime::Preedit(text, cursor_offset) => {
let preedit =
(!text.is_empty()).then(|| Preedit::new(text, cursor_offset));
if self.ctx.display.ime.preedit() != preedit.as_ref() {
self.ctx.display.ime.set_preedit(preedit);
self.ctx.update_cursor_blinking();
*self.ctx.dirty = true;
}
},
Ime::Enabled => {
self.ctx.display.ime.set_enabled(true);
*self.ctx.dirty = true;
},
Ime::Disabled => {
self.ctx.display.ime.set_enabled(false);
*self.ctx.dirty = true;
},
},
WindowEvent::KeyboardInput { is_synthetic: true, .. }
| WindowEvent::ActivationTokenDone { .. }
| WindowEvent::DoubleTapGesture { .. }
| WindowEvent::TouchpadPressure { .. }
| WindowEvent::RotationGesture { .. }
| WindowEvent::CursorEntered { .. }
| WindowEvent::PinchGesture { .. }
| WindowEvent::AxisMotion { .. }
| WindowEvent::PanGesture { .. }
| WindowEvent::HoveredFileCancelled
| WindowEvent::Destroyed
| WindowEvent::ThemeChanged(_)
| WindowEvent::HoveredFile(_)
| WindowEvent::RedrawRequested
| WindowEvent::Moved(_) => (),
}
},
WinitEvent::Suspended
| WinitEvent::NewEvents { .. }
| WinitEvent::DeviceEvent { .. }
| WinitEvent::LoopExiting
| WinitEvent::Resumed
| WinitEvent::MemoryWarning
| WinitEvent::AboutToWait => (),
}
}
}
#[derive(Debug, Clone)]
pub struct EventProxy {
proxy: EventLoopProxy<Event>,
window_id: WindowId,
}
impl EventProxy {
pub fn new(proxy: EventLoopProxy<Event>, window_id: WindowId) -> Self {
Self { proxy, window_id }
}
/// Send an event to the event loop.
pub fn send_event(&self, event: EventType) {
let _ = self.proxy.send_event(Event::new(event, self.window_id));
}
}
impl EventListener for EventProxy {
fn send_event(&self, event: TerminalEvent) {
let _ = self.proxy.send_event(Event::new(event.into(), self.window_id));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 1314,
"name": "semantic_word",
"signature": "fn semantic_word(&self, point: Point) -> String",
"start_line": 1268
} | {
"class_name": "impl<'a, N: Notify + 'a, T: EventListener> input::ActionContext<T> for ActionContext<'a, N, T> {\n #[inline]\n fn write_to_pty<B: Into<Cow<'static, [u8]>>>(&self, val: B) {\n self.notifier.notify(val);\n }\n\n /// Request a redraw.\n #[inline]\n fn mark_dirty(&mut self) {\n *self.dirty = true;\n }\n\n #[inline]\n fn size_info(&self) -> SizeInfo {\n self.display.size_info\n }\n\n fn scroll(&mut self, scroll: Scroll) {\n let old_offset = self.terminal.grid().display_offset() as i32;\n\n let old_vi_cursor = self.terminal.vi_mode_cursor;\n self.terminal.scroll_display(scroll);\n\n let lines_changed = old_offset - self.terminal.grid().display_offset() as i32;\n\n // Keep track of manual display offset changes during search.\n if self.search_active() {\n self.search_state.display_offset_delta += lines_changed;\n }\n\n let vi_mode = self.terminal.mode().contains(TermMode::VI);\n\n // Update selection.\n if vi_mode && self.terminal.selection.as_ref().is_some_and(|s| !s.is_empty()) {\n self.update_selection(self.terminal.vi_mode_cursor.point, Side::Right);\n } else if self.mouse.left_button_state == ElementState::Pressed\n || self.mouse.right_button_state == ElementState::Pressed\n {\n let display_offset = self.terminal.grid().display_offset();\n let point = self.mouse.point(&self.size_info(), display_offset);\n self.update_selection(point, self.mouse.cell_side);\n }\n\n // Scrolling inside Vi mode moves the cursor, so start typing.\n if vi_mode {\n self.on_typing_start();\n }\n\n // Update dirty if actually scrolled or moved Vi cursor in Vi mode.\n *self.dirty |=\n lines_changed != 0 || (vi_mode && old_vi_cursor != self.terminal.vi_mode_cursor);\n }\n\n // Copy text selection.\n fn copy_selection(&mut self, ty: ClipboardType) {\n let text = match self.terminal.selection_to_string().filter(|s| !s.is_empty()) {\n Some(text) => text,\n None => return,\n };\n\n if ty == ClipboardType::Selection && self.config.selection.save_to_clipboard {\n self.clipboard.store(ClipboardType::Clipboard, text.clone());\n }\n self.clipboard.store(ty, text);\n }\n\n fn selection_is_empty(&self) -> bool {\n self.terminal.selection.as_ref().map_or(true, Selection::is_empty)\n }\n\n fn clear_selection(&mut self) {\n // Clear the selection on the terminal.\n let selection = self.terminal.selection.take();\n // Mark the terminal as dirty when selection wasn't empty.\n *self.dirty |= selection.is_some_and(|s| !s.is_empty());\n }\n\n fn update_selection(&mut self, mut point: Point, side: Side) {\n let mut selection = match self.terminal.selection.take() {\n Some(selection) => selection,\n None => return,\n };\n\n // Treat motion over message bar like motion over the last line.\n point.line = min(point.line, self.terminal.bottommost_line());\n\n // Update selection.\n selection.update(point, side);\n\n // Move vi cursor and expand selection.\n if self.terminal.mode().contains(TermMode::VI) && !self.search_active() {\n self.terminal.vi_mode_cursor.point = point;\n selection.include_all();\n }\n\n self.terminal.selection = Some(selection);\n *self.dirty = true;\n }\n\n fn start_selection(&mut self, ty: SelectionType, point: Point, side: Side) {\n self.terminal.selection = Some(Selection::new(ty, point, side));\n *self.dirty = true;\n\n self.copy_selection(ClipboardType::Selection);\n }\n\n fn toggle_selection(&mut self, ty: SelectionType, point: Point, side: Side) {\n match &mut self.terminal.selection {\n Some(selection) if selection.ty == ty && !selection.is_empty() => {\n self.clear_selection();\n },\n Some(selection) if !selection.is_empty() => {\n selection.ty = ty;\n *self.dirty = true;\n\n self.copy_selection(ClipboardType::Selection);\n },\n _ => self.start_selection(ty, point, side),\n }\n }\n\n #[inline]\n fn mouse_mode(&self) -> bool {\n self.terminal.mode().intersects(TermMode::MOUSE_MODE)\n && !self.terminal.mode().contains(TermMode::VI)\n }\n\n #[inline]\n fn mouse_mut(&mut self) -> &mut Mouse {\n self.mouse\n }\n\n #[inline]\n fn mouse(&self) -> &Mouse {\n self.mouse\n }\n\n #[inline]\n fn touch_purpose(&mut self) -> &mut TouchPurpose {\n self.touch\n }\n\n #[inline]\n fn modifiers(&mut self) -> &mut Modifiers {\n self.modifiers\n }\n\n #[inline]\n fn window(&mut self) -> &mut Window {\n &mut self.display.window\n }\n\n #[inline]\n fn display(&mut self) -> &mut Display {\n self.display\n }\n\n #[inline]\n fn terminal(&self) -> &Term<T> {\n self.terminal\n }\n\n #[inline]\n fn terminal_mut(&mut self) -> &mut Term<T> {\n self.terminal\n }\n\n fn spawn_new_instance(&mut self) {\n let mut env_args = env::args();\n let alacritty = env_args.next().unwrap();\n\n let mut args: Vec<String> = Vec::new();\n\n // Reuse the arguments passed to Alacritty for the new instance.\n #[allow(clippy::while_let_on_iterator)]\n while let Some(arg) = env_args.next() {\n // New instances shouldn't inherit command.\n if arg == \"-e\" || arg == \"--command\" {\n break;\n }\n\n // On unix, the working directory of the foreground shell is used by `start_daemon`.\n #[cfg(not(windows))]\n if arg == \"--working-directory\" {\n let _ = env_args.next();\n continue;\n }\n\n args.push(arg);\n }\n\n self.spawn_daemon(&alacritty, &args);\n }\n\n #[cfg(not(windows))]\n fn create_new_window(&mut self, #[cfg(target_os = \"macos\")] tabbing_id: Option<String>) {\n let mut options = WindowOptions::default();\n options.terminal_options.working_directory =\n foreground_process_path(self.master_fd, self.shell_pid).ok();\n\n #[cfg(target_os = \"macos\")]\n {\n options.window_tabbing_id = tabbing_id;\n }\n\n let _ = self.event_proxy.send_event(Event::new(EventType::CreateWindow(options), None));\n }\n\n #[cfg(windows)]\n fn create_new_window(&mut self) {\n let _ = self\n .event_proxy\n .send_event(Event::new(EventType::CreateWindow(WindowOptions::default()), None));\n }\n\n fn spawn_daemon<I, S>(&self, program: &str, args: I)\n where\n I: IntoIterator<Item = S> + Debug + Copy,\n S: AsRef<OsStr>,\n {\n #[cfg(not(windows))]\n let result = spawn_daemon(program, args, self.master_fd, self.shell_pid);\n #[cfg(windows)]\n let result = spawn_daemon(program, args);\n\n match result {\n Ok(_) => debug!(\"Launched {} with args {:?}\", program, args),\n Err(err) => warn!(\"Unable to launch {program} with args {args:?}: {err}\"),\n }\n }\n\n fn change_font_size(&mut self, delta: f32) {\n // Round to pick integral px steps, since fonts look better on them.\n let new_size = self.display.font_size.as_px().round() + delta;\n self.display.font_size = FontSize::from_px(new_size);\n let font = self.config.font.clone().with_size(self.display.font_size);\n self.display.pending_update.set_font(font);\n }\n\n fn reset_font_size(&mut self) {\n let scale_factor = self.display.window.scale_factor as f32;\n self.display.font_size = self.config.font.size().scale(scale_factor);\n self.display\n .pending_update\n .set_font(self.config.font.clone().with_size(self.display.font_size));\n }\n\n #[inline]\n fn pop_message(&mut self) {\n if !self.message_buffer.is_empty() {\n self.display.pending_update.dirty = true;\n self.message_buffer.pop();\n }\n }\n\n #[inline]\n fn start_search(&mut self, direction: Direction) {\n // Only create new history entry if the previous regex wasn't empty.\n if self.search_state.history.front().map_or(true, |regex| !regex.is_empty()) {\n self.search_state.history.push_front(String::new());\n self.search_state.history.truncate(MAX_SEARCH_HISTORY_SIZE);\n }\n\n self.search_state.history_index = Some(0);\n self.search_state.direction = direction;\n self.search_state.focused_match = None;\n\n // Store original search position as origin and reset location.\n if self.terminal.mode().contains(TermMode::VI) {\n self.search_state.origin = self.terminal.vi_mode_cursor.point;\n self.search_state.display_offset_delta = 0;\n\n // Adjust origin for content moving upward on search start.\n if self.terminal.grid().cursor.point.line + 1 == self.terminal.screen_lines() {\n self.search_state.origin.line -= 1;\n }\n } else {\n let viewport_top = Line(-(self.terminal.grid().display_offset() as i32)) - 1;\n let viewport_bottom = viewport_top + self.terminal.bottommost_line();\n let last_column = self.terminal.last_column();\n self.search_state.origin = match direction {\n Direction::Right => Point::new(viewport_top, Column(0)),\n Direction::Left => Point::new(viewport_bottom, last_column),\n };\n }\n\n // Enable IME so we can input into the search bar with it if we were in Vi mode.\n self.window().set_ime_allowed(true);\n\n self.display.damage_tracker.frame().mark_fully_damaged();\n self.display.pending_update.dirty = true;\n }\n\n #[inline]\n fn start_seeded_search(&mut self, direction: Direction, text: String) {\n let origin = self.terminal.vi_mode_cursor.point;\n\n // Start new search.\n self.clear_selection();\n self.start_search(direction);\n\n // Enter initial selection text.\n for c in text.chars() {\n if let '$' | '('..='+' | '?' | '['..='^' | '{'..='}' = c {\n self.search_input('\\\\');\n }\n self.search_input(c);\n }\n\n // Leave search mode.\n self.confirm_search();\n\n if !self.terminal.mode().contains(TermMode::VI) {\n return;\n }\n\n // Find the target vi cursor point by going to the next match to the right of the origin,\n // then jump to the next search match in the target direction.\n let target = self.search_next(origin, Direction::Right, Side::Right).and_then(|rm| {\n let regex_match = match direction {\n Direction::Right => {\n let origin = rm.end().add(self.terminal, Boundary::None, 1);\n self.search_next(origin, Direction::Right, Side::Left)?\n },\n Direction::Left => {\n let origin = rm.start().sub(self.terminal, Boundary::None, 1);\n self.search_next(origin, Direction::Left, Side::Left)?\n },\n };\n Some(*regex_match.start())\n });\n\n // Move the vi cursor to the target position.\n if let Some(target) = target {\n self.terminal_mut().vi_goto_point(target);\n self.mark_dirty();\n }\n }\n\n #[inline]\n fn confirm_search(&mut self) {\n // Just cancel search when not in vi mode.\n if !self.terminal.mode().contains(TermMode::VI) {\n self.cancel_search();\n return;\n }\n\n // Force unlimited search if the previous one was interrupted.\n let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id());\n if self.scheduler.scheduled(timer_id) {\n self.goto_match(None);\n }\n\n self.exit_search();\n }\n\n #[inline]\n fn cancel_search(&mut self) {\n if self.terminal.mode().contains(TermMode::VI) {\n // Recover pre-search state in vi mode.\n self.search_reset_state();\n } else if let Some(focused_match) = &self.search_state.focused_match {\n // Create a selection for the focused match.\n let start = *focused_match.start();\n let end = *focused_match.end();\n self.start_selection(SelectionType::Simple, start, Side::Left);\n self.update_selection(end, Side::Right);\n self.copy_selection(ClipboardType::Selection);\n }\n\n self.search_state.dfas = None;\n\n self.exit_search();\n }\n\n #[inline]\n fn search_input(&mut self, c: char) {\n match self.search_state.history_index {\n Some(0) => (),\n // When currently in history, replace active regex with history on change.\n Some(index) => {\n self.search_state.history[0] = self.search_state.history[index].clone();\n self.search_state.history_index = Some(0);\n },\n None => return,\n }\n let regex = &mut self.search_state.history[0];\n\n match c {\n // Handle backspace/ctrl+h.\n '\\x08' | '\\x7f' => {\n let _ = regex.pop();\n },\n // Add ascii and unicode text.\n ' '..='~' | '\\u{a0}'..='\\u{10ffff}' => regex.push(c),\n // Ignore non-printable characters.\n _ => return,\n }\n\n if !self.terminal.mode().contains(TermMode::VI) {\n // Clear selection so we do not obstruct any matches.\n self.terminal.selection = None;\n }\n\n self.update_search();\n }\n\n #[inline]\n fn search_pop_word(&mut self) {\n if let Some(regex) = self.search_state.regex_mut() {\n *regex = regex.trim_end().to_owned();\n regex.truncate(regex.rfind(' ').map_or(0, |i| i + 1));\n self.update_search();\n }\n }\n\n /// Go to the previous regex in the search history.\n #[inline]\n fn search_history_previous(&mut self) {\n let index = match &mut self.search_state.history_index {\n None => return,\n Some(index) if *index + 1 >= self.search_state.history.len() => return,\n Some(index) => index,\n };\n\n *index += 1;\n self.update_search();\n }\n\n /// Go to the previous regex in the search history.\n #[inline]\n fn search_history_next(&mut self) {\n let index = match &mut self.search_state.history_index {\n Some(0) | None => return,\n Some(index) => index,\n };\n\n *index -= 1;\n self.update_search();\n }\n\n #[inline]\n fn advance_search_origin(&mut self, direction: Direction) {\n // Use focused match as new search origin if available.\n if let Some(focused_match) = &self.search_state.focused_match {\n let new_origin = match direction {\n Direction::Right => focused_match.end().add(self.terminal, Boundary::None, 1),\n Direction::Left => focused_match.start().sub(self.terminal, Boundary::None, 1),\n };\n\n self.terminal.scroll_to_point(new_origin);\n\n self.search_state.display_offset_delta = 0;\n self.search_state.origin = new_origin;\n }\n\n // Search for the next match using the supplied direction.\n let search_direction = mem::replace(&mut self.search_state.direction, direction);\n self.goto_match(None);\n self.search_state.direction = search_direction;\n\n // If we found a match, we set the search origin right in front of it to make sure that\n // after modifications to the regex the search is started without moving the focused match\n // around.\n let focused_match = match &self.search_state.focused_match {\n Some(focused_match) => focused_match,\n None => return,\n };\n\n // Set new origin to the left/right of the match, depending on search direction.\n let new_origin = match self.search_state.direction {\n Direction::Right => *focused_match.start(),\n Direction::Left => *focused_match.end(),\n };\n\n // Store the search origin with display offset by checking how far we need to scroll to it.\n let old_display_offset = self.terminal.grid().display_offset() as i32;\n self.terminal.scroll_to_point(new_origin);\n let new_display_offset = self.terminal.grid().display_offset() as i32;\n self.search_state.display_offset_delta = new_display_offset - old_display_offset;\n\n // Store origin and scroll back to the match.\n self.terminal.scroll_display(Scroll::Delta(-self.search_state.display_offset_delta));\n self.search_state.origin = new_origin;\n }\n\n /// Find the next search match.\n fn search_next(&mut self, origin: Point, direction: Direction, side: Side) -> Option<Match> {\n self.search_state\n .dfas\n .as_mut()\n .and_then(|dfas| self.terminal.search_next(dfas, origin, direction, side, None))\n }\n\n #[inline]\n fn search_direction(&self) -> Direction {\n self.search_state.direction\n }\n\n #[inline]\n fn search_active(&self) -> bool {\n self.search_state.history_index.is_some()\n }\n\n /// Handle keyboard typing start.\n ///\n /// This will temporarily disable some features like terminal cursor blinking or the mouse\n /// cursor.\n ///\n /// All features are re-enabled again automatically.\n #[inline]\n fn on_typing_start(&mut self) {\n // Disable cursor blinking.\n let timer_id = TimerId::new(Topic::BlinkCursor, self.display.window.id());\n if self.scheduler.unschedule(timer_id).is_some() {\n self.schedule_blinking();\n\n // Mark the cursor as visible and queue redraw if the cursor was hidden.\n if mem::take(&mut self.display.cursor_hidden) {\n *self.dirty = true;\n }\n } else if *self.cursor_blink_timed_out {\n self.update_cursor_blinking();\n }\n\n // Hide mouse cursor.\n if self.config.mouse.hide_when_typing {\n self.display.window.set_mouse_visible(false);\n }\n }\n\n /// Process a new character for keyboard hints.\n fn hint_input(&mut self, c: char) {\n if let Some(hint) = self.display.hint_state.keyboard_input(self.terminal, c) {\n self.mouse.block_hint_launcher = false;\n self.trigger_hint(&hint);\n }\n *self.dirty = true;\n }\n\n /// Trigger a hint action.\n fn trigger_hint(&mut self, hint: &HintMatch) {\n if self.mouse.block_hint_launcher {\n return;\n }\n\n let hint_bounds = hint.bounds();\n let text = match hint.text(self.terminal) {\n Some(text) => text,\n None => return,\n };\n\n match &hint.action() {\n // Launch an external program.\n HintAction::Command(command) => {\n let mut args = command.args().to_vec();\n args.push(text.into());\n self.spawn_daemon(command.program(), &args);\n },\n // Copy the text to the clipboard.\n HintAction::Action(HintInternalAction::Copy) => {\n self.clipboard.store(ClipboardType::Clipboard, text);\n },\n // Write the text to the PTY/search.\n HintAction::Action(HintInternalAction::Paste) => self.paste(&text, true),\n // Select the text.\n HintAction::Action(HintInternalAction::Select) => {\n self.start_selection(SelectionType::Simple, *hint_bounds.start(), Side::Left);\n self.update_selection(*hint_bounds.end(), Side::Right);\n self.copy_selection(ClipboardType::Selection);\n },\n // Move the vi mode cursor.\n HintAction::Action(HintInternalAction::MoveViModeCursor) => {\n // Enter vi mode if we're not in it already.\n if !self.terminal.mode().contains(TermMode::VI) {\n self.terminal.toggle_vi_mode();\n }\n\n self.terminal.vi_goto_point(*hint_bounds.start());\n self.mark_dirty();\n },\n }\n }\n\n /// Expand the selection to the current mouse cursor position.\n #[inline]\n fn expand_selection(&mut self) {\n let control = self.modifiers().state().control_key();\n let selection_type = match self.mouse().click_state {\n ClickState::None => return,\n _ if control => SelectionType::Block,\n ClickState::Click => SelectionType::Simple,\n ClickState::DoubleClick => SelectionType::Semantic,\n ClickState::TripleClick => SelectionType::Lines,\n };\n\n // Load mouse point, treating message bar and padding as the closest cell.\n let display_offset = self.terminal().grid().display_offset();\n let point = self.mouse().point(&self.size_info(), display_offset);\n\n let cell_side = self.mouse().cell_side;\n\n let selection = match &mut self.terminal_mut().selection {\n Some(selection) => selection,\n None => return,\n };\n\n selection.ty = selection_type;\n self.update_selection(point, cell_side);\n\n // Move vi mode cursor to mouse click position.\n if self.terminal().mode().contains(TermMode::VI) && !self.search_active() {\n self.terminal_mut().vi_mode_cursor.point = point;\n }\n }\n\n /// Get the semantic word at the specified point.\n fn semantic_word(&self, point: Point) -> String {\n let terminal = self.terminal();\n let grid = terminal.grid();\n\n // Find the next semantic word boundary to the right.\n let mut end = terminal.semantic_search_right(point);\n\n // Get point at which skipping over semantic characters has led us back to the\n // original character.\n let start_cell = &grid[point];\n let search_end = if start_cell.flags.intersects(Flags::LEADING_WIDE_CHAR_SPACER) {\n point.add(terminal, Boundary::None, 2)\n } else if start_cell.flags.intersects(Flags::WIDE_CHAR) {\n point.add(terminal, Boundary::None, 1)\n } else {\n point\n };\n\n // Keep moving until we're not on top of a semantic escape character.\n let semantic_chars = terminal.semantic_escape_chars();\n loop {\n let cell = &grid[end];\n\n // Get cell's character, taking wide characters into account.\n let c = if cell.flags.contains(Flags::WIDE_CHAR_SPACER) {\n grid[end.sub(terminal, Boundary::None, 1)].c\n } else {\n cell.c\n };\n\n if !semantic_chars.contains(c) {\n break;\n }\n\n end = terminal.semantic_search_right(end.add(terminal, Boundary::None, 1));\n\n // Stop if the entire grid is only semantic escape characters.\n if end == search_end {\n return String::new();\n }\n }\n\n // Find the beginning of the semantic word.\n let start = terminal.semantic_search_left(end);\n\n terminal.bounds_to_string(start, end)\n }\n\n /// Handle beginning of terminal text input.\n fn on_terminal_input_start(&mut self) {\n self.on_typing_start();\n self.clear_selection();\n\n if self.terminal().grid().display_offset() != 0 {\n self.scroll(Scroll::Bottom);\n }\n }\n\n /// Paste a text into the terminal.\n fn paste(&mut self, text: &str, bracketed: bool) {\n if self.search_active() {\n for c in text.chars() {\n self.search_input(c);\n }\n } else if self.inline_search_state.char_pending {\n self.inline_search_input(text);\n } else if bracketed && self.terminal().mode().contains(TermMode::BRACKETED_PASTE) {\n self.on_terminal_input_start();\n\n self.write_to_pty(&b\"\\x1b[200~\"[..]);\n\n // Write filtered escape sequences.\n //\n // We remove `\\x1b` to ensure it's impossible for the pasted text to write the bracketed\n // paste end escape `\\x1b[201~` and `\\x03` since some shells incorrectly terminate\n // bracketed paste when they receive it.\n let filtered = text.replace(['\\x1b', '\\x03'], \"\");\n self.write_to_pty(filtered.into_bytes());\n\n self.write_to_pty(&b\"\\x1b[201~\"[..]);\n } else {\n self.on_terminal_input_start();\n\n let payload = if bracketed {\n // In non-bracketed (ie: normal) mode, terminal applications cannot distinguish\n // pasted data from keystrokes.\n //\n // In theory, we should construct the keystrokes needed to produce the data we are\n // pasting... since that's neither practical nor sensible (and probably an\n // impossible task to solve in a general way), we'll just replace line breaks\n // (windows and unix style) with a single carriage return (\\r, which is what the\n // Enter key produces).\n text.replace(\"\\r\\n\", \"\\r\").replace('\\n', \"\\r\").into_bytes()\n } else {\n // When we explicitly disable bracketed paste don't manipulate with the input,\n // so we pass user input as is.\n text.to_owned().into_bytes()\n };\n\n self.write_to_pty(payload);\n }\n }\n\n /// Toggle the vi mode status.\n #[inline]\n fn toggle_vi_mode(&mut self) {\n let was_in_vi_mode = self.terminal.mode().contains(TermMode::VI);\n if was_in_vi_mode {\n // If we had search running when leaving Vi mode we should mark terminal fully damaged\n // to cleanup highlighted results.\n if self.search_state.dfas.take().is_some() {\n self.display.damage_tracker.frame().mark_fully_damaged();\n }\n } else {\n self.clear_selection();\n }\n\n if self.search_active() {\n self.cancel_search();\n }\n\n // We don't want IME in Vi mode.\n self.window().set_ime_allowed(was_in_vi_mode);\n\n self.terminal.toggle_vi_mode();\n\n *self.dirty = true;\n }\n\n /// Get vi inline search state.\n fn inline_search_state(&mut self) -> &mut InlineSearchState {\n self.inline_search_state\n }\n\n /// Start vi mode inline search.\n fn start_inline_search(&mut self, direction: Direction, stop_short: bool) {\n self.inline_search_state.stop_short = stop_short;\n self.inline_search_state.direction = direction;\n self.inline_search_state.char_pending = true;\n self.inline_search_state.character = None;\n }\n\n /// Jump to the next matching character in the line.\n fn inline_search_next(&mut self) {\n let direction = self.inline_search_state.direction;\n self.inline_search(direction);\n }\n\n /// Jump to the next matching character in the line.\n fn inline_search_previous(&mut self) {\n let direction = self.inline_search_state.direction.opposite();\n self.inline_search(direction);\n }\n\n /// Process input during inline search.\n fn inline_search_input(&mut self, text: &str) {\n // Ignore input with empty text, like modifier keys.\n let c = match text.chars().next() {\n Some(c) => c,\n None => return,\n };\n\n self.inline_search_state.char_pending = false;\n self.inline_search_state.character = Some(c);\n self.window().set_ime_allowed(false);\n\n // Immediately move to the captured character.\n self.inline_search_next();\n }\n\n fn message(&self) -> Option<&Message> {\n self.message_buffer.message()\n }\n\n fn config(&self) -> &UiConfig {\n self.config\n }\n\n #[cfg(target_os = \"macos\")]\n fn event_loop(&self) -> &ActiveEventLoop {\n self.event_loop\n }\n\n fn clipboard_mut(&mut self) -> &mut Clipboard {\n self.clipboard\n }\n\n fn scheduler_mut(&mut self) -> &mut Scheduler {\n self.scheduler\n }\n}",
"class_signature": "impl<'a, N: Notify + 'a, T: EventListener> input::ActionContext<T> for ActionContext<'a, N, T>"
} |
create_log_message | alacritty-master/alacritty/src/logging.rs | fn create_log_message(record: &log::Record<'_>, target: &str, start: Instant) -> String {
let runtime = start.elapsed();
let secs = runtime.as_secs();
let nanos = runtime.subsec_nanos();
let mut message = format!("[{}.{:0>9}s] [{:<5}] [{}] ", secs, nanos, record.level(), target);
// Alignment for the lines after the first new line character in the payload. We don't deal
// with fullwidth/unicode chars here, so just `message.len()` is sufficient.
let alignment = message.len();
// Push lines with added extra padding on the next line, which is trimmed later.
let lines = record.args().to_string();
for line in lines.split('\n') {
let line = format!("{}\n{:width$}", line, "", width = alignment);
message.push_str(&line);
}
// Drop extra trailing alignment.
message.truncate(message.len() - alignment);
message
} | //! Logging for Alacritty.
//!
//! The main executable is supposed to call `initialize()` exactly once during
//! startup. All logging messages are written to stdout, given that their
//! log-level is sufficient for the level configured in `cli::Options`.
use std::fs::{File, OpenOptions};
use std::io::{self, LineWriter, Stdout, Write};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, OnceLock};
use std::time::Instant;
use std::{env, process};
use log::{Level, LevelFilter};
use winit::event_loop::EventLoopProxy;
use crate::cli::Options;
use crate::event::{Event, EventType};
use crate::message_bar::{Message, MessageType};
/// Logging target for IPC config error messages.
pub const LOG_TARGET_IPC_CONFIG: &str = "alacritty_log_window_config";
/// Name for the environment variable containing the log file's path.
const ALACRITTY_LOG_ENV: &str = "ALACRITTY_LOG";
/// Logging target for config error messages.
pub const LOG_TARGET_CONFIG: &str = "alacritty_config_derive";
/// Logging target for winit events.
pub const LOG_TARGET_WINIT: &str = "alacritty_winit_event";
/// Name for the environment variable containing extra logging targets.
///
/// The targets are semicolon separated.
const ALACRITTY_EXTRA_LOG_TARGETS_ENV: &str = "ALACRITTY_EXTRA_LOG_TARGETS";
/// User configurable extra log targets to include.
fn extra_log_targets() -> &'static [String] {
static EXTRA_LOG_TARGETS: OnceLock<Vec<String>> = OnceLock::new();
EXTRA_LOG_TARGETS.get_or_init(|| {
env::var(ALACRITTY_EXTRA_LOG_TARGETS_ENV)
.map_or(Vec::new(), |targets| targets.split(';').map(ToString::to_string).collect())
})
}
/// List of targets which will be logged by Alacritty.
const ALLOWED_TARGETS: &[&str] = &[
LOG_TARGET_IPC_CONFIG,
LOG_TARGET_CONFIG,
LOG_TARGET_WINIT,
"alacritty_config_derive",
"alacritty_terminal",
"alacritty",
"crossfont",
];
/// Initialize the logger to its defaults.
pub fn initialize(
options: &Options,
event_proxy: EventLoopProxy<Event>,
) -> Result<Option<PathBuf>, log::SetLoggerError> {
log::set_max_level(options.log_level());
let logger = Logger::new(event_proxy);
let path = logger.file_path();
log::set_boxed_logger(Box::new(logger))?;
Ok(path)
}
pub struct Logger {
logfile: Mutex<OnDemandLogFile>,
stdout: Mutex<LineWriter<Stdout>>,
event_proxy: Mutex<EventLoopProxy<Event>>,
start: Instant,
}
impl Logger {
fn new(event_proxy: EventLoopProxy<Event>) -> Self {
let logfile = Mutex::new(OnDemandLogFile::new());
let stdout = Mutex::new(LineWriter::new(io::stdout()));
Logger { logfile, stdout, event_proxy: Mutex::new(event_proxy), start: Instant::now() }
}
fn file_path(&self) -> Option<PathBuf> {
if let Ok(logfile) = self.logfile.lock() {
Some(logfile.path().clone())
} else {
None
}
}
/// Log a record to the message bar.
fn message_bar_log(&self, record: &log::Record<'_>, logfile_path: &str) {
let message_type = match record.level() {
Level::Error => MessageType::Error,
Level::Warn => MessageType::Warning,
_ => return,
};
let event_proxy = match self.event_proxy.lock() {
Ok(event_proxy) => event_proxy,
Err(_) => return,
};
#[cfg(not(windows))]
let env_var = format!("${ALACRITTY_LOG_ENV}");
#[cfg(windows)]
let env_var = format!("%{}%", ALACRITTY_LOG_ENV);
let message = format!(
"[{}] {}\nSee log at {} ({})",
record.level(),
record.args(),
logfile_path,
env_var,
);
let mut message = Message::new(message, message_type);
message.set_target(record.target().to_owned());
let _ = event_proxy.send_event(Event::new(EventType::Message(message), None));
}
}
impl log::Log for Logger {
fn enabled(&self, metadata: &log::Metadata<'_>) -> bool {
metadata.level() <= log::max_level()
}
fn log(&self, record: &log::Record<'_>) {
// Get target crate.
let index = record.target().find(':').unwrap_or_else(|| record.target().len());
let target = &record.target()[..index];
// Only log our own crates, except when logging at Level::Trace.
if !self.enabled(record.metadata()) || !is_allowed_target(record.level(), target) {
return;
}
// Create log message for the given `record` and `target`.
let message = create_log_message(record, target, self.start);
if let Ok(mut logfile) = self.logfile.lock() {
// Write to logfile.
let _ = logfile.write_all(message.as_ref());
// Log relevant entries to message bar.
self.message_bar_log(record, &logfile.path.to_string_lossy());
}
// Write to stdout.
if let Ok(mut stdout) = self.stdout.lock() {
let _ = stdout.write_all(message.as_ref());
}
}
fn flush(&self) {}
}
fn create_log_message(record: &log::Record<'_>, target: &str, start: Instant) -> String {
let runtime = start.elapsed();
let secs = runtime.as_secs();
let nanos = runtime.subsec_nanos();
let mut message = format!("[{}.{:0>9}s] [{:<5}] [{}] ", secs, nanos, record.level(), target);
// Alignment for the lines after the first new line character in the payload. We don't deal
// with fullwidth/unicode chars here, so just `message.len()` is sufficient.
let alignment = message.len();
// Push lines with added extra padding on the next line, which is trimmed later.
let lines = record.args().to_string();
for line in lines.split('\n') {
let line = format!("{}\n{:width$}", line, "", width = alignment);
message.push_str(&line);
}
// Drop extra trailing alignment.
message.truncate(message.len() - alignment);
message
}
/// Check if log messages from a crate should be logged.
fn is_allowed_target(level: Level, target: &str) -> bool {
match (level, log::max_level()) {
(Level::Error, LevelFilter::Trace) | (Level::Warn, LevelFilter::Trace) => true,
_ => ALLOWED_TARGETS.contains(&target) || extra_log_targets().iter().any(|t| t == target),
}
}
struct OnDemandLogFile {
file: Option<LineWriter<File>>,
created: Arc<AtomicBool>,
path: PathBuf,
}
impl OnDemandLogFile {
fn new() -> Self {
let mut path = env::temp_dir();
path.push(format!("Alacritty-{}.log", process::id()));
// Set log path as an environment variable.
env::set_var(ALACRITTY_LOG_ENV, path.as_os_str());
OnDemandLogFile { path, file: None, created: Arc::new(AtomicBool::new(false)) }
}
fn file(&mut self) -> Result<&mut LineWriter<File>, io::Error> {
// Allow to recreate the file if it has been deleted at runtime.
if self.file.is_some() && !self.path.as_path().exists() {
self.file = None;
}
// Create the file if it doesn't exist yet.
if self.file.is_none() {
let file = OpenOptions::new().append(true).create_new(true).open(&self.path);
match file {
Ok(file) => {
self.file = Some(io::LineWriter::new(file));
self.created.store(true, Ordering::Relaxed);
let _ =
writeln!(io::stdout(), "Created log file at \"{}\"", self.path.display());
},
Err(e) => {
let _ = writeln!(io::stdout(), "Unable to create log file: {e}");
return Err(e);
},
}
}
Ok(self.file.as_mut().unwrap())
}
fn path(&self) -> &PathBuf {
&self.path
}
}
impl Write for OnDemandLogFile {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.file()?.write(buf)
}
fn flush(&mut self) -> Result<(), io::Error> {
self.file()?.flush()
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Record<'a> {\n metadata: Metadata<'a>,\n args: fmt::Arguments<'a>,\n module_path: Option<MaybeStaticStr<'a>>,\n file: Option<MaybeStaticStr<'a>>,\n line: Option<u32>,\n #[cfg(feature = \"kv\")]\n key_values: KeyValues<'a>,\n}"
],
"name": "record",
"type": "&log::Record<'_>"
},
{
"definitions": [
"/// use std::time::{Duration, SystemTime};"
],
"name": "start",
"type": "Instant"
}
],
"end_line": 185,
"name": "create_log_message",
"signature": "fn create_log_message(record: &log::Record<'_>, target: &str, start: Instant) -> String",
"start_line": 165
} | {
"class_name": "",
"class_signature": ""
} |
get_program_info_log | alacritty-master/alacritty/src/renderer/shader.rs | fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
} | use std::ffi::CStr;
use std::fmt;
use crate::gl;
use crate::gl::types::*;
/// A wrapper for a shader program id, with automatic lifetime management.
#[derive(Debug)]
pub struct ShaderProgram(GLuint);
#[derive(Copy, Clone, Debug)]
pub enum ShaderVersion {
/// OpenGL 3.3 core shaders.
Glsl3,
/// OpenGL ES 2.0 shaders.
Gles2,
}
impl ShaderVersion {
// Header to which we concatenate the entire shader. The newlines are required.
fn shader_header(&self) -> &'static str {
match self {
Self::Glsl3 => "#version 330 core\n",
Self::Gles2 => "#version 100\n#define GLES2_RENDERER\n",
}
}
}
impl ShaderProgram {
pub fn new(
shader_version: ShaderVersion,
shader_header: Option<&str>,
vertex_shader: &'static str,
fragment_shader: &'static str,
) -> Result<Self, ShaderError> {
let vertex_shader =
Shader::new(shader_version, shader_header, gl::VERTEX_SHADER, vertex_shader)?;
let fragment_shader =
Shader::new(shader_version, shader_header, gl::FRAGMENT_SHADER, fragment_shader)?;
let program = unsafe { Self(gl::CreateProgram()) };
let mut success: GLint = 0;
unsafe {
gl::AttachShader(program.id(), vertex_shader.id());
gl::AttachShader(program.id(), fragment_shader.id());
gl::LinkProgram(program.id());
gl::GetProgramiv(program.id(), gl::LINK_STATUS, &mut success);
}
if success != i32::from(gl::TRUE) {
return Err(ShaderError::Link(get_program_info_log(program.id())));
}
Ok(program)
}
/// Get uniform location by name. Panic if failed.
pub fn get_uniform_location(&self, name: &'static CStr) -> Result<GLint, ShaderError> {
// This call doesn't require `UseProgram`.
let ret = unsafe { gl::GetUniformLocation(self.id(), name.as_ptr()) };
if ret == -1 {
return Err(ShaderError::Uniform(name));
}
Ok(ret)
}
/// Get the shader program id.
pub fn id(&self) -> GLuint {
self.0
}
}
impl Drop for ShaderProgram {
fn drop(&mut self) {
unsafe { gl::DeleteProgram(self.0) }
}
}
/// A wrapper for a shader id, with automatic lifetime management.
#[derive(Debug)]
struct Shader(GLuint);
impl Shader {
fn new(
shader_version: ShaderVersion,
shader_header: Option<&str>,
kind: GLenum,
source: &'static str,
) -> Result<Self, ShaderError> {
let version_header = shader_version.shader_header();
let mut sources = Vec::<*const GLchar>::with_capacity(3);
let mut lengths = Vec::<GLint>::with_capacity(3);
sources.push(version_header.as_ptr().cast());
lengths.push(version_header.len() as GLint);
if let Some(shader_header) = shader_header {
sources.push(shader_header.as_ptr().cast());
lengths.push(shader_header.len() as GLint);
}
sources.push(source.as_ptr().cast());
lengths.push(source.len() as GLint);
let shader = unsafe { Self(gl::CreateShader(kind)) };
let mut success: GLint = 0;
unsafe {
gl::ShaderSource(
shader.id(),
lengths.len() as GLint,
sources.as_ptr().cast(),
lengths.as_ptr(),
);
gl::CompileShader(shader.id());
gl::GetShaderiv(shader.id(), gl::COMPILE_STATUS, &mut success);
}
if success == GLint::from(gl::TRUE) {
Ok(shader)
} else {
Err(ShaderError::Compile(get_shader_info_log(shader.id())))
}
}
fn id(&self) -> GLuint {
self.0
}
}
impl Drop for Shader {
fn drop(&mut self) {
unsafe { gl::DeleteShader(self.0) }
}
}
fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
#[derive(Debug)]
pub enum ShaderError {
/// Error compiling shader.
Compile(String),
/// Error linking shader.
Link(String),
/// Error getting uniform location.
Uniform(&'static CStr),
}
impl std::error::Error for ShaderError {}
impl fmt::Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Compile(reason) => write!(f, "Failed compiling shader: {reason}"),
Self::Link(reason) => write!(f, "Failed linking shader: {reason}"),
Self::Uniform(name) => write!(f, "Failed to get uniform location of {name:?}"),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum __GLsync {}"
],
"name": "program",
"type": "GLuint"
}
],
"end_line": 158,
"name": "get_program_info_log",
"signature": "fn get_program_info_log(program: GLuint) -> String",
"start_line": 138
} | {
"class_name": "",
"class_signature": ""
} |
get_shader_info_log | alacritty-master/alacritty/src/renderer/shader.rs | fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
} | use std::ffi::CStr;
use std::fmt;
use crate::gl;
use crate::gl::types::*;
/// A wrapper for a shader program id, with automatic lifetime management.
#[derive(Debug)]
pub struct ShaderProgram(GLuint);
#[derive(Copy, Clone, Debug)]
pub enum ShaderVersion {
/// OpenGL 3.3 core shaders.
Glsl3,
/// OpenGL ES 2.0 shaders.
Gles2,
}
impl ShaderVersion {
// Header to which we concatenate the entire shader. The newlines are required.
fn shader_header(&self) -> &'static str {
match self {
Self::Glsl3 => "#version 330 core\n",
Self::Gles2 => "#version 100\n#define GLES2_RENDERER\n",
}
}
}
impl ShaderProgram {
pub fn new(
shader_version: ShaderVersion,
shader_header: Option<&str>,
vertex_shader: &'static str,
fragment_shader: &'static str,
) -> Result<Self, ShaderError> {
let vertex_shader =
Shader::new(shader_version, shader_header, gl::VERTEX_SHADER, vertex_shader)?;
let fragment_shader =
Shader::new(shader_version, shader_header, gl::FRAGMENT_SHADER, fragment_shader)?;
let program = unsafe { Self(gl::CreateProgram()) };
let mut success: GLint = 0;
unsafe {
gl::AttachShader(program.id(), vertex_shader.id());
gl::AttachShader(program.id(), fragment_shader.id());
gl::LinkProgram(program.id());
gl::GetProgramiv(program.id(), gl::LINK_STATUS, &mut success);
}
if success != i32::from(gl::TRUE) {
return Err(ShaderError::Link(get_program_info_log(program.id())));
}
Ok(program)
}
/// Get uniform location by name. Panic if failed.
pub fn get_uniform_location(&self, name: &'static CStr) -> Result<GLint, ShaderError> {
// This call doesn't require `UseProgram`.
let ret = unsafe { gl::GetUniformLocation(self.id(), name.as_ptr()) };
if ret == -1 {
return Err(ShaderError::Uniform(name));
}
Ok(ret)
}
/// Get the shader program id.
pub fn id(&self) -> GLuint {
self.0
}
}
impl Drop for ShaderProgram {
fn drop(&mut self) {
unsafe { gl::DeleteProgram(self.0) }
}
}
/// A wrapper for a shader id, with automatic lifetime management.
#[derive(Debug)]
struct Shader(GLuint);
impl Shader {
fn new(
shader_version: ShaderVersion,
shader_header: Option<&str>,
kind: GLenum,
source: &'static str,
) -> Result<Self, ShaderError> {
let version_header = shader_version.shader_header();
let mut sources = Vec::<*const GLchar>::with_capacity(3);
let mut lengths = Vec::<GLint>::with_capacity(3);
sources.push(version_header.as_ptr().cast());
lengths.push(version_header.len() as GLint);
if let Some(shader_header) = shader_header {
sources.push(shader_header.as_ptr().cast());
lengths.push(shader_header.len() as GLint);
}
sources.push(source.as_ptr().cast());
lengths.push(source.len() as GLint);
let shader = unsafe { Self(gl::CreateShader(kind)) };
let mut success: GLint = 0;
unsafe {
gl::ShaderSource(
shader.id(),
lengths.len() as GLint,
sources.as_ptr().cast(),
lengths.as_ptr(),
);
gl::CompileShader(shader.id());
gl::GetShaderiv(shader.id(), gl::COMPILE_STATUS, &mut success);
}
if success == GLint::from(gl::TRUE) {
Ok(shader)
} else {
Err(ShaderError::Compile(get_shader_info_log(shader.id())))
}
}
fn id(&self) -> GLuint {
self.0
}
}
impl Drop for Shader {
fn drop(&mut self) {
unsafe { gl::DeleteShader(self.0) }
}
}
fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
#[derive(Debug)]
pub enum ShaderError {
/// Error compiling shader.
Compile(String),
/// Error linking shader.
Link(String),
/// Error getting uniform location.
Uniform(&'static CStr),
}
impl std::error::Error for ShaderError {}
impl fmt::Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Compile(reason) => write!(f, "Failed compiling shader: {reason}"),
Self::Link(reason) => write!(f, "Failed linking shader: {reason}"),
Self::Uniform(name) => write!(f, "Failed to get uniform location of {name:?}"),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum __GLsync {}"
],
"name": "shader",
"type": "GLuint"
}
],
"end_line": 180,
"name": "get_shader_info_log",
"signature": "fn get_shader_info_log(shader: GLuint) -> String",
"start_line": 160
} | {
"class_name": "",
"class_signature": ""
} |
create_rect | alacritty-master/alacritty/src/renderer/rects.rs | fn create_rect(
size: &SizeInfo,
descent: f32,
start: Point<usize>,
end: Point<usize>,
position: f32,
mut thickness: f32,
color: Rgb,
) -> RenderRect {
let start_x = start.column.0 as f32 * size.cell_width();
let end_x = (end.column.0 + 1) as f32 * size.cell_width();
let width = end_x - start_x;
// Make sure lines are always visible.
thickness = thickness.max(1.);
let line_bottom = (start.line as f32 + 1.) * size.cell_height();
let baseline = line_bottom + descent;
let mut y = (baseline - position - thickness / 2.).round();
let max_y = line_bottom - thickness;
if y > max_y {
y = max_y;
}
RenderRect::new(
start_x + size.padding_x(),
y + size.padding_y(),
width,
thickness,
color,
1.,
)
} | use std::collections::HashMap;
use std::mem;
use ahash::RandomState;
use crossfont::Metrics;
use log::info;
use alacritty_terminal::grid::Dimensions;
use alacritty_terminal::index::{Column, Point};
use alacritty_terminal::term::cell::Flags;
use crate::display::color::Rgb;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderError, ShaderProgram, ShaderVersion};
use crate::renderer::{self, cstr};
#[derive(Debug, Copy, Clone)]
pub struct RenderRect {
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
pub color: Rgb,
pub alpha: f32,
pub kind: RectKind,
}
impl RenderRect {
pub fn new(x: f32, y: f32, width: f32, height: f32, color: Rgb, alpha: f32) -> Self {
RenderRect { kind: RectKind::Normal, x, y, width, height, color, alpha }
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct RenderLine {
pub start: Point<usize>,
pub end: Point<usize>,
pub color: Rgb,
}
// NOTE: These flags must be in sync with their usage in the rect.*.glsl shaders.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum RectKind {
Normal = 0,
Undercurl = 1,
DottedUnderline = 2,
DashedUnderline = 3,
NumKinds = 4,
}
impl RenderLine {
pub fn rects(&self, flag: Flags, metrics: &Metrics, size: &SizeInfo) -> Vec<RenderRect> {
let mut rects = Vec::new();
let mut start = self.start;
while start.line < self.end.line {
let end = Point::new(start.line, size.last_column());
Self::push_rects(&mut rects, metrics, size, flag, start, end, self.color);
start = Point::new(start.line + 1, Column(0));
}
Self::push_rects(&mut rects, metrics, size, flag, start, self.end, self.color);
rects
}
/// Push all rects required to draw the cell's line.
fn push_rects(
rects: &mut Vec<RenderRect>,
metrics: &Metrics,
size: &SizeInfo,
flag: Flags,
start: Point<usize>,
end: Point<usize>,
color: Rgb,
) {
let (position, thickness, ty) = match flag {
Flags::DOUBLE_UNDERLINE => {
// Position underlines so each one has 50% of descent available.
let top_pos = 0.25 * metrics.descent;
let bottom_pos = 0.75 * metrics.descent;
rects.push(Self::create_rect(
size,
metrics.descent,
start,
end,
top_pos,
metrics.underline_thickness,
color,
));
(bottom_pos, metrics.underline_thickness, RectKind::Normal)
},
// Make undercurl occupy the entire descent area.
Flags::UNDERCURL => (metrics.descent, metrics.descent.abs(), RectKind::Undercurl),
Flags::UNDERLINE => {
(metrics.underline_position, metrics.underline_thickness, RectKind::Normal)
},
// Make dotted occupy the entire descent area.
Flags::DOTTED_UNDERLINE => {
(metrics.descent, metrics.descent.abs(), RectKind::DottedUnderline)
},
Flags::DASHED_UNDERLINE => {
(metrics.underline_position, metrics.underline_thickness, RectKind::DashedUnderline)
},
Flags::STRIKEOUT => {
(metrics.strikeout_position, metrics.strikeout_thickness, RectKind::Normal)
},
_ => unimplemented!("Invalid flag for cell line drawing specified"),
};
let mut rect =
Self::create_rect(size, metrics.descent, start, end, position, thickness, color);
rect.kind = ty;
rects.push(rect);
}
/// Create a line's rect at a position relative to the baseline.
fn create_rect(
size: &SizeInfo,
descent: f32,
start: Point<usize>,
end: Point<usize>,
position: f32,
mut thickness: f32,
color: Rgb,
) -> RenderRect {
let start_x = start.column.0 as f32 * size.cell_width();
let end_x = (end.column.0 + 1) as f32 * size.cell_width();
let width = end_x - start_x;
// Make sure lines are always visible.
thickness = thickness.max(1.);
let line_bottom = (start.line as f32 + 1.) * size.cell_height();
let baseline = line_bottom + descent;
let mut y = (baseline - position - thickness / 2.).round();
let max_y = line_bottom - thickness;
if y > max_y {
y = max_y;
}
RenderRect::new(
start_x + size.padding_x(),
y + size.padding_y(),
width,
thickness,
color,
1.,
)
}
}
/// Lines for underline and strikeout.
#[derive(Default)]
pub struct RenderLines {
inner: HashMap<Flags, Vec<RenderLine>, RandomState>,
}
impl RenderLines {
#[inline]
pub fn new() -> Self {
Self::default()
}
#[inline]
pub fn rects(&self, metrics: &Metrics, size: &SizeInfo) -> Vec<RenderRect> {
self.inner
.iter()
.flat_map(|(flag, lines)| {
lines.iter().flat_map(move |line| line.rects(*flag, metrics, size))
})
.collect()
}
/// Update the stored lines with the next cell info.
#[inline]
pub fn update(&mut self, cell: &RenderableCell) {
self.update_flag(cell, Flags::UNDERLINE);
self.update_flag(cell, Flags::DOUBLE_UNDERLINE);
self.update_flag(cell, Flags::STRIKEOUT);
self.update_flag(cell, Flags::UNDERCURL);
self.update_flag(cell, Flags::DOTTED_UNDERLINE);
self.update_flag(cell, Flags::DASHED_UNDERLINE);
}
/// Update the lines for a specific flag.
fn update_flag(&mut self, cell: &RenderableCell, flag: Flags) {
if !cell.flags.contains(flag) {
return;
}
// The underline color escape does not apply to strikeout.
let color = if flag.contains(Flags::STRIKEOUT) { cell.fg } else { cell.underline };
// Include wide char spacer if the current cell is a wide char.
let mut end = cell.point;
if cell.flags.contains(Flags::WIDE_CHAR) {
end.column += 1;
}
// Check if there's an active line.
if let Some(line) = self.inner.get_mut(&flag).and_then(|lines| lines.last_mut()) {
if color == line.color
&& cell.point.column == line.end.column + 1
&& cell.point.line == line.end.line
{
// Update the length of the line.
line.end = end;
return;
}
}
// Start new line if there currently is none.
let line = RenderLine { start: cell.point, end, color };
match self.inner.get_mut(&flag) {
Some(lines) => lines.push(line),
None => {
self.inner.insert(flag, vec![line]);
},
}
}
}
/// Shader sources for rect rendering program.
const RECT_SHADER_F: &str = include_str!("../../res/rect.f.glsl");
const RECT_SHADER_V: &str = include_str!("../../res/rect.v.glsl");
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct Vertex {
// Normalized screen coordinates.
x: f32,
y: f32,
// Color.
r: u8,
g: u8,
b: u8,
a: u8,
}
#[derive(Debug)]
pub struct RectRenderer {
// GL buffer objects.
vao: GLuint,
vbo: GLuint,
programs: [RectShaderProgram; 4],
vertices: [Vec<Vertex>; 4],
}
impl RectRenderer {
pub fn new(shader_version: ShaderVersion) -> Result<Self, renderer::Error> {
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let rect_program = RectShaderProgram::new(shader_version, RectKind::Normal)?;
let undercurl_program = RectShaderProgram::new(shader_version, RectKind::Undercurl)?;
// This shader has way more ALU operations than other rect shaders, so use a fallback
// to underline just for it when we can't compile it.
let dotted_program = match RectShaderProgram::new(shader_version, RectKind::DottedUnderline)
{
Ok(dotted_program) => dotted_program,
Err(err) => {
info!("Error compiling dotted shader: {err}\n falling back to underline");
RectShaderProgram::new(shader_version, RectKind::Normal)?
},
};
let dashed_program = RectShaderProgram::new(shader_version, RectKind::DashedUnderline)?;
unsafe {
// Allocate buffers.
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// VBO binding is not part of VAO itself, but VBO binding is stored in attributes.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
let mut attribute_offset = 0;
// Position.
gl::VertexAttribPointer(
0,
2,
gl::FLOAT,
gl::FALSE,
mem::size_of::<Vertex>() as i32,
attribute_offset as *const _,
);
gl::EnableVertexAttribArray(0);
attribute_offset += mem::size_of::<f32>() * 2;
// Color.
gl::VertexAttribPointer(
1,
4,
gl::UNSIGNED_BYTE,
gl::TRUE,
mem::size_of::<Vertex>() as i32,
attribute_offset as *const _,
);
gl::EnableVertexAttribArray(1);
// Reset buffer bindings.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
let programs = [rect_program, undercurl_program, dotted_program, dashed_program];
Ok(Self { vao, vbo, programs, vertices: Default::default() })
}
pub fn draw(&mut self, size_info: &SizeInfo, metrics: &Metrics, rects: Vec<RenderRect>) {
unsafe {
// Bind VAO to enable vertex attribute slots.
gl::BindVertexArray(self.vao);
// Bind VBO only once for buffer data upload only.
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
}
let half_width = size_info.width() / 2.;
let half_height = size_info.height() / 2.;
// Build rect vertices vector.
self.vertices.iter_mut().for_each(|vertices| vertices.clear());
for rect in &rects {
Self::add_rect(&mut self.vertices[rect.kind as usize], half_width, half_height, rect);
}
unsafe {
// We iterate in reverse order to draw plain rects at the end, since we want visual
// bell or damage rects be above the lines.
for rect_kind in (RectKind::Normal as u8..RectKind::NumKinds as u8).rev() {
let vertices = &mut self.vertices[rect_kind as usize];
if vertices.is_empty() {
continue;
}
let program = &self.programs[rect_kind as usize];
gl::UseProgram(program.id());
program.update_uniforms(size_info, metrics);
// Upload accumulated undercurl vertices.
gl::BufferData(
gl::ARRAY_BUFFER,
(vertices.len() * mem::size_of::<Vertex>()) as isize,
vertices.as_ptr() as *const _,
gl::STREAM_DRAW,
);
// Draw all vertices as list of triangles.
gl::DrawArrays(gl::TRIANGLES, 0, vertices.len() as i32);
}
// Disable program.
gl::UseProgram(0);
// Reset buffer bindings to nothing.
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
}
}
fn add_rect(vertices: &mut Vec<Vertex>, half_width: f32, half_height: f32, rect: &RenderRect) {
// Calculate rectangle vertices positions in normalized device coordinates.
// NDC range from -1 to +1, with Y pointing up.
let x = rect.x / half_width - 1.0;
let y = -rect.y / half_height + 1.0;
let width = rect.width / half_width;
let height = rect.height / half_height;
let (r, g, b) = rect.color.as_tuple();
let a = (rect.alpha * 255.) as u8;
// Make quad vertices.
let quad = [
Vertex { x, y, r, g, b, a },
Vertex { x, y: y - height, r, g, b, a },
Vertex { x: x + width, y, r, g, b, a },
Vertex { x: x + width, y: y - height, r, g, b, a },
];
// Append the vertices to form two triangles.
vertices.push(quad[0]);
vertices.push(quad[1]);
vertices.push(quad[2]);
vertices.push(quad[2]);
vertices.push(quad[3]);
vertices.push(quad[1]);
}
}
impl Drop for RectRenderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
/// Rectangle drawing program.
#[derive(Debug)]
pub struct RectShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Cell width.
u_cell_width: Option<GLint>,
/// Cell height.
u_cell_height: Option<GLint>,
/// Terminal padding.
u_padding_x: Option<GLint>,
/// A padding from the bottom of the screen to viewport.
u_padding_y: Option<GLint>,
/// Underline position.
u_underline_position: Option<GLint>,
/// Underline thickness.
u_underline_thickness: Option<GLint>,
/// Undercurl position.
u_undercurl_position: Option<GLint>,
}
impl RectShaderProgram {
pub fn new(shader_version: ShaderVersion, kind: RectKind) -> Result<Self, ShaderError> {
// XXX: This must be in-sync with fragment shader defines.
let header = match kind {
RectKind::Undercurl => Some("#define DRAW_UNDERCURL\n"),
RectKind::DottedUnderline => Some("#define DRAW_DOTTED\n"),
RectKind::DashedUnderline => Some("#define DRAW_DASHED\n"),
_ => None,
};
let program = ShaderProgram::new(shader_version, header, RECT_SHADER_V, RECT_SHADER_F)?;
Ok(Self {
u_cell_width: program.get_uniform_location(cstr!("cellWidth")).ok(),
u_cell_height: program.get_uniform_location(cstr!("cellHeight")).ok(),
u_padding_x: program.get_uniform_location(cstr!("paddingX")).ok(),
u_padding_y: program.get_uniform_location(cstr!("paddingY")).ok(),
u_underline_position: program.get_uniform_location(cstr!("underlinePosition")).ok(),
u_underline_thickness: program.get_uniform_location(cstr!("underlineThickness")).ok(),
u_undercurl_position: program.get_uniform_location(cstr!("undercurlPosition")).ok(),
program,
})
}
fn id(&self) -> GLuint {
self.program.id()
}
pub fn update_uniforms(&self, size_info: &SizeInfo, metrics: &Metrics) {
let position = (0.5 * metrics.descent).abs();
let underline_position = metrics.descent.abs() - metrics.underline_position.abs();
let viewport_height = size_info.height() - size_info.padding_y();
let padding_y = viewport_height
- (viewport_height / size_info.cell_height()).floor() * size_info.cell_height();
unsafe {
if let Some(u_cell_width) = self.u_cell_width {
gl::Uniform1f(u_cell_width, size_info.cell_width());
}
if let Some(u_cell_height) = self.u_cell_height {
gl::Uniform1f(u_cell_height, size_info.cell_height());
}
if let Some(u_padding_y) = self.u_padding_y {
gl::Uniform1f(u_padding_y, padding_y);
}
if let Some(u_padding_x) = self.u_padding_x {
gl::Uniform1f(u_padding_x, size_info.padding_x());
}
if let Some(u_underline_position) = self.u_underline_position {
gl::Uniform1f(u_underline_position, underline_position);
}
if let Some(u_underline_thickness) = self.u_underline_thickness {
gl::Uniform1f(u_underline_thickness, metrics.underline_thickness);
}
if let Some(u_undercurl_position) = self.u_undercurl_position {
gl::Uniform1f(u_undercurl_position, position);
}
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct SizeInfo<T = f32> {\n /// Terminal window width.\n width: T,\n\n /// Terminal window height.\n height: T,\n\n /// Width of individual cell.\n cell_width: T,\n\n /// Height of individual cell.\n cell_height: T,\n\n /// Horizontal window padding.\n padding_x: T,\n\n /// Vertical window padding.\n padding_y: T,\n\n /// Number of lines in the viewport.\n screen_lines: usize,\n\n /// Number of columns in the viewport.\n columns: usize,\n}"
],
"name": "size",
"type": "&SizeInfo"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "start",
"type": "Point<usize>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "end",
"type": "Point<usize>"
},
{
"definitions": [
"impl Rgb {\n #[inline]\n pub const fn new(r: u8, g: u8, b: u8) -> Self {\n Self(VteRgb { r, g, b })\n }\n\n #[inline]\n pub fn as_tuple(self) -> (u8, u8, u8) {\n (self.0.r, self.0.g, self.0.b)\n }\n}"
],
"name": "color",
"type": "Rgb"
}
],
"end_line": 156,
"name": "create_rect",
"signature": "fn create_rect(\n size: &SizeInfo,\n descent: f32,\n start: Point<usize>,\n end: Point<usize>,\n position: f32,\n mut thickness: f32,\n color: Rgb,\n ) -> RenderRect",
"start_line": 123
} | {
"class_name": "impl RenderLine {\n pub fn rects(&self, flag: Flags, metrics: &Metrics, size: &SizeInfo) -> Vec<RenderRect> {\n let mut rects = Vec::new();\n\n let mut start = self.start;\n while start.line < self.end.line {\n let end = Point::new(start.line, size.last_column());\n Self::push_rects(&mut rects, metrics, size, flag, start, end, self.color);\n start = Point::new(start.line + 1, Column(0));\n }\n Self::push_rects(&mut rects, metrics, size, flag, start, self.end, self.color);\n\n rects\n }\n\n /// Push all rects required to draw the cell's line.\n fn push_rects(\n rects: &mut Vec<RenderRect>,\n metrics: &Metrics,\n size: &SizeInfo,\n flag: Flags,\n start: Point<usize>,\n end: Point<usize>,\n color: Rgb,\n ) {\n let (position, thickness, ty) = match flag {\n Flags::DOUBLE_UNDERLINE => {\n // Position underlines so each one has 50% of descent available.\n let top_pos = 0.25 * metrics.descent;\n let bottom_pos = 0.75 * metrics.descent;\n\n rects.push(Self::create_rect(\n size,\n metrics.descent,\n start,\n end,\n top_pos,\n metrics.underline_thickness,\n color,\n ));\n\n (bottom_pos, metrics.underline_thickness, RectKind::Normal)\n },\n // Make undercurl occupy the entire descent area.\n Flags::UNDERCURL => (metrics.descent, metrics.descent.abs(), RectKind::Undercurl),\n Flags::UNDERLINE => {\n (metrics.underline_position, metrics.underline_thickness, RectKind::Normal)\n },\n // Make dotted occupy the entire descent area.\n Flags::DOTTED_UNDERLINE => {\n (metrics.descent, metrics.descent.abs(), RectKind::DottedUnderline)\n },\n Flags::DASHED_UNDERLINE => {\n (metrics.underline_position, metrics.underline_thickness, RectKind::DashedUnderline)\n },\n Flags::STRIKEOUT => {\n (metrics.strikeout_position, metrics.strikeout_thickness, RectKind::Normal)\n },\n _ => unimplemented!(\"Invalid flag for cell line drawing specified\"),\n };\n\n let mut rect =\n Self::create_rect(size, metrics.descent, start, end, position, thickness, color);\n rect.kind = ty;\n rects.push(rect);\n }\n\n /// Create a line's rect at a position relative to the baseline.\n fn create_rect(\n size: &SizeInfo,\n descent: f32,\n start: Point<usize>,\n end: Point<usize>,\n position: f32,\n mut thickness: f32,\n color: Rgb,\n ) -> RenderRect {\n let start_x = start.column.0 as f32 * size.cell_width();\n let end_x = (end.column.0 + 1) as f32 * size.cell_width();\n let width = end_x - start_x;\n\n // Make sure lines are always visible.\n thickness = thickness.max(1.);\n\n let line_bottom = (start.line as f32 + 1.) * size.cell_height();\n let baseline = line_bottom + descent;\n\n let mut y = (baseline - position - thickness / 2.).round();\n let max_y = line_bottom - thickness;\n if y > max_y {\n y = max_y;\n }\n\n RenderRect::new(\n start_x + size.padding_x(),\n y + size.padding_y(),\n width,\n thickness,\n color,\n 1.,\n )\n }\n}",
"class_signature": "impl RenderLine"
} |
new | alacritty-master/alacritty/src/renderer/text/atlas.rs | pub fn new(size: i32, is_gles_context: bool) -> Self {
let mut id: GLuint = 0;
unsafe {
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
gl::GenTextures(1, &mut id);
gl::BindTexture(gl::TEXTURE_2D, id);
// Use RGBA texture for both normal and emoji glyphs, since it has no performance
// impact.
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA as i32,
size,
size,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
ptr::null(),
);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl::BindTexture(gl::TEXTURE_2D, 0);
}
Self {
id,
width: size,
height: size,
row_extent: 0,
row_baseline: 0,
row_tallest: 0,
is_gles_context,
}
} | use std::borrow::Cow;
use std::ptr;
use crossfont::{BitmapBuffer, RasterizedGlyph};
use crate::gl;
use crate::gl::types::*;
use super::Glyph;
/// Size of the Atlas.
pub const ATLAS_SIZE: i32 = 1024;
/// Manages a single texture atlas.
///
/// The strategy for filling an atlas looks roughly like this:
///
/// ```text
/// (width, height)
/// ┌─────┬─────┬─────┬─────┬─────┐
/// │ 10 │ │ │ │ │ <- Empty spaces; can be filled while
/// │ │ │ │ │ │ glyph_height < height - row_baseline
/// ├─────┼─────┼─────┼─────┼─────┤
/// │ 5 │ 6 │ 7 │ 8 │ 9 │
/// │ │ │ │ │ │
/// ├─────┼─────┼─────┼─────┴─────┤ <- Row height is tallest glyph in row; this is
/// │ 1 │ 2 │ 3 │ 4 │ used as the baseline for the following row.
/// │ │ │ │ │ <- Row considered full when next glyph doesn't
/// └─────┴─────┴─────┴───────────┘ fit in the row.
/// (0, 0) x->
/// ```
#[derive(Debug)]
pub struct Atlas {
/// Texture id for this atlas.
id: GLuint,
/// Width of atlas.
width: i32,
/// Height of atlas.
height: i32,
/// Left-most free pixel in a row.
///
/// This is called the extent because it is the upper bound of used pixels
/// in a row.
row_extent: i32,
/// Baseline for glyphs in the current row.
row_baseline: i32,
/// Tallest glyph in current row.
///
/// This is used as the advance when end of row is reached.
row_tallest: i32,
/// Gles context.
///
/// This affects the texture loading.
is_gles_context: bool,
}
/// Error that can happen when inserting a texture to the Atlas.
pub enum AtlasInsertError {
/// Texture atlas is full.
Full,
/// The glyph cannot fit within a single texture.
GlyphTooLarge,
}
impl Atlas {
pub fn new(size: i32, is_gles_context: bool) -> Self {
let mut id: GLuint = 0;
unsafe {
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
gl::GenTextures(1, &mut id);
gl::BindTexture(gl::TEXTURE_2D, id);
// Use RGBA texture for both normal and emoji glyphs, since it has no performance
// impact.
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA as i32,
size,
size,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
ptr::null(),
);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl::BindTexture(gl::TEXTURE_2D, 0);
}
Self {
id,
width: size,
height: size,
row_extent: 0,
row_baseline: 0,
row_tallest: 0,
is_gles_context,
}
}
pub fn clear(&mut self) {
self.row_extent = 0;
self.row_baseline = 0;
self.row_tallest = 0;
}
/// Insert a RasterizedGlyph into the texture atlas.
pub fn insert(
&mut self,
glyph: &RasterizedGlyph,
active_tex: &mut u32,
) -> Result<Glyph, AtlasInsertError> {
if glyph.width > self.width || glyph.height > self.height {
return Err(AtlasInsertError::GlyphTooLarge);
}
// If there's not enough room in current row, go onto next one.
if !self.room_in_row(glyph) {
self.advance_row()?;
}
// If there's still not room, there's nothing that can be done here..
if !self.room_in_row(glyph) {
return Err(AtlasInsertError::Full);
}
// There appears to be room; load the glyph.
Ok(self.insert_inner(glyph, active_tex))
}
/// Insert the glyph without checking for room.
///
/// Internal function for use once atlas has been checked for space. GL
/// errors could still occur at this point if we were checking for them;
/// hence, the Result.
fn insert_inner(&mut self, glyph: &RasterizedGlyph, active_tex: &mut u32) -> Glyph {
let offset_y = self.row_baseline;
let offset_x = self.row_extent;
let height = glyph.height;
let width = glyph.width;
let multicolor;
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.id);
// Load data into OpenGL.
let (format, buffer) = match &glyph.buffer {
BitmapBuffer::Rgb(buffer) => {
multicolor = false;
// Gles context doesn't allow uploading RGB data into RGBA texture, so need
// explicit copy.
if self.is_gles_context {
let mut new_buffer = Vec::with_capacity(buffer.len() / 3 * 4);
for rgb in buffer.chunks_exact(3) {
new_buffer.push(rgb[0]);
new_buffer.push(rgb[1]);
new_buffer.push(rgb[2]);
new_buffer.push(u8::MAX);
}
(gl::RGBA, Cow::Owned(new_buffer))
} else {
(gl::RGB, Cow::Borrowed(buffer))
}
},
BitmapBuffer::Rgba(buffer) => {
multicolor = true;
(gl::RGBA, Cow::Borrowed(buffer))
},
};
gl::TexSubImage2D(
gl::TEXTURE_2D,
0,
offset_x,
offset_y,
width,
height,
format,
gl::UNSIGNED_BYTE,
buffer.as_ptr() as *const _,
);
gl::BindTexture(gl::TEXTURE_2D, 0);
*active_tex = 0;
}
// Update Atlas state.
self.row_extent = offset_x + width;
if height > self.row_tallest {
self.row_tallest = height;
}
// Generate UV coordinates.
let uv_bot = offset_y as f32 / self.height as f32;
let uv_left = offset_x as f32 / self.width as f32;
let uv_height = height as f32 / self.height as f32;
let uv_width = width as f32 / self.width as f32;
Glyph {
tex_id: self.id,
multicolor,
top: glyph.top as i16,
left: glyph.left as i16,
width: width as i16,
height: height as i16,
uv_bot,
uv_left,
uv_width,
uv_height,
}
}
/// Check if there's room in the current row for given glyph.
pub fn room_in_row(&self, raw: &RasterizedGlyph) -> bool {
let next_extent = self.row_extent + raw.width;
let enough_width = next_extent <= self.width;
let enough_height = raw.height < (self.height - self.row_baseline);
enough_width && enough_height
}
/// Mark current row as finished and prepare to insert into the next row.
pub fn advance_row(&mut self) -> Result<(), AtlasInsertError> {
let advance_to = self.row_baseline + self.row_tallest;
if self.height - advance_to <= 0 {
return Err(AtlasInsertError::Full);
}
self.row_baseline = advance_to;
self.row_extent = 0;
self.row_tallest = 0;
Ok(())
}
/// Load a glyph into a texture atlas.
///
/// If the current atlas is full, a new one will be created.
#[inline]
pub fn load_glyph(
active_tex: &mut GLuint,
atlas: &mut Vec<Atlas>,
current_atlas: &mut usize,
rasterized: &RasterizedGlyph,
) -> Glyph {
// At least one atlas is guaranteed to be in the `self.atlas` list; thus
// the unwrap.
match atlas[*current_atlas].insert(rasterized, active_tex) {
Ok(glyph) => glyph,
Err(AtlasInsertError::Full) => {
// Get the context type before adding a new Atlas.
let is_gles_context = atlas[*current_atlas].is_gles_context;
// Advance the current Atlas index.
*current_atlas += 1;
if *current_atlas == atlas.len() {
let new = Atlas::new(ATLAS_SIZE, is_gles_context);
*active_tex = 0; // Atlas::new binds a texture. Ugh this is sloppy.
atlas.push(new);
}
Atlas::load_glyph(active_tex, atlas, current_atlas, rasterized)
},
Err(AtlasInsertError::GlyphTooLarge) => Glyph {
tex_id: atlas[*current_atlas].id,
multicolor: false,
top: 0,
left: 0,
width: 0,
height: 0,
uv_bot: 0.,
uv_left: 0.,
uv_width: 0.,
uv_height: 0.,
},
}
}
#[inline]
pub fn clear_atlas(atlas: &mut [Atlas], current_atlas: &mut usize) {
for atlas in atlas.iter_mut() {
atlas.clear();
}
*current_atlas = 0;
}
}
impl Drop for Atlas {
fn drop(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.id);
}
}
}
| rust | {
"argument_definitions": [],
"end_line": 110,
"name": "new",
"signature": "pub fn new(size: i32, is_gles_context: bool) -> Self",
"start_line": 73
} | {
"class_name": "impl Atlas {\n pub fn new(size: i32, is_gles_context: bool) -> Self {\n let mut id: GLuint = 0;\n unsafe {\n gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);\n gl::GenTextures(1, &mut id);\n gl::BindTexture(gl::TEXTURE_2D, id);\n // Use RGBA texture for both normal and emoji glyphs, since it has no performance\n // impact.\n gl::TexImage2D(\n gl::TEXTURE_2D,\n 0,\n gl::RGBA as i32,\n size,\n size,\n 0,\n gl::RGBA,\n gl::UNSIGNED_BYTE,\n ptr::null(),\n );\n\n gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as i32);\n gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as i32);\n gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);\n gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);\n\n gl::BindTexture(gl::TEXTURE_2D, 0);\n }\n\n Self {\n id,\n width: size,\n height: size,\n row_extent: 0,\n row_baseline: 0,\n row_tallest: 0,\n is_gles_context,\n }\n }\n\n pub fn clear(&mut self) {\n self.row_extent = 0;\n self.row_baseline = 0;\n self.row_tallest = 0;\n }\n\n /// Insert a RasterizedGlyph into the texture atlas.\n pub fn insert(\n &mut self,\n glyph: &RasterizedGlyph,\n active_tex: &mut u32,\n ) -> Result<Glyph, AtlasInsertError> {\n if glyph.width > self.width || glyph.height > self.height {\n return Err(AtlasInsertError::GlyphTooLarge);\n }\n\n // If there's not enough room in current row, go onto next one.\n if !self.room_in_row(glyph) {\n self.advance_row()?;\n }\n\n // If there's still not room, there's nothing that can be done here..\n if !self.room_in_row(glyph) {\n return Err(AtlasInsertError::Full);\n }\n\n // There appears to be room; load the glyph.\n Ok(self.insert_inner(glyph, active_tex))\n }\n\n /// Insert the glyph without checking for room.\n ///\n /// Internal function for use once atlas has been checked for space. GL\n /// errors could still occur at this point if we were checking for them;\n /// hence, the Result.\n fn insert_inner(&mut self, glyph: &RasterizedGlyph, active_tex: &mut u32) -> Glyph {\n let offset_y = self.row_baseline;\n let offset_x = self.row_extent;\n let height = glyph.height;\n let width = glyph.width;\n let multicolor;\n\n unsafe {\n gl::BindTexture(gl::TEXTURE_2D, self.id);\n\n // Load data into OpenGL.\n let (format, buffer) = match &glyph.buffer {\n BitmapBuffer::Rgb(buffer) => {\n multicolor = false;\n // Gles context doesn't allow uploading RGB data into RGBA texture, so need\n // explicit copy.\n if self.is_gles_context {\n let mut new_buffer = Vec::with_capacity(buffer.len() / 3 * 4);\n for rgb in buffer.chunks_exact(3) {\n new_buffer.push(rgb[0]);\n new_buffer.push(rgb[1]);\n new_buffer.push(rgb[2]);\n new_buffer.push(u8::MAX);\n }\n (gl::RGBA, Cow::Owned(new_buffer))\n } else {\n (gl::RGB, Cow::Borrowed(buffer))\n }\n },\n BitmapBuffer::Rgba(buffer) => {\n multicolor = true;\n (gl::RGBA, Cow::Borrowed(buffer))\n },\n };\n\n gl::TexSubImage2D(\n gl::TEXTURE_2D,\n 0,\n offset_x,\n offset_y,\n width,\n height,\n format,\n gl::UNSIGNED_BYTE,\n buffer.as_ptr() as *const _,\n );\n\n gl::BindTexture(gl::TEXTURE_2D, 0);\n *active_tex = 0;\n }\n\n // Update Atlas state.\n self.row_extent = offset_x + width;\n if height > self.row_tallest {\n self.row_tallest = height;\n }\n\n // Generate UV coordinates.\n let uv_bot = offset_y as f32 / self.height as f32;\n let uv_left = offset_x as f32 / self.width as f32;\n let uv_height = height as f32 / self.height as f32;\n let uv_width = width as f32 / self.width as f32;\n\n Glyph {\n tex_id: self.id,\n multicolor,\n top: glyph.top as i16,\n left: glyph.left as i16,\n width: width as i16,\n height: height as i16,\n uv_bot,\n uv_left,\n uv_width,\n uv_height,\n }\n }\n\n /// Check if there's room in the current row for given glyph.\n pub fn room_in_row(&self, raw: &RasterizedGlyph) -> bool {\n let next_extent = self.row_extent + raw.width;\n let enough_width = next_extent <= self.width;\n let enough_height = raw.height < (self.height - self.row_baseline);\n\n enough_width && enough_height\n }\n\n /// Mark current row as finished and prepare to insert into the next row.\n pub fn advance_row(&mut self) -> Result<(), AtlasInsertError> {\n let advance_to = self.row_baseline + self.row_tallest;\n if self.height - advance_to <= 0 {\n return Err(AtlasInsertError::Full);\n }\n\n self.row_baseline = advance_to;\n self.row_extent = 0;\n self.row_tallest = 0;\n\n Ok(())\n }\n\n /// Load a glyph into a texture atlas.\n ///\n /// If the current atlas is full, a new one will be created.\n #[inline]\n pub fn load_glyph(\n active_tex: &mut GLuint,\n atlas: &mut Vec<Atlas>,\n current_atlas: &mut usize,\n rasterized: &RasterizedGlyph,\n ) -> Glyph {\n // At least one atlas is guaranteed to be in the `self.atlas` list; thus\n // the unwrap.\n match atlas[*current_atlas].insert(rasterized, active_tex) {\n Ok(glyph) => glyph,\n Err(AtlasInsertError::Full) => {\n // Get the context type before adding a new Atlas.\n let is_gles_context = atlas[*current_atlas].is_gles_context;\n\n // Advance the current Atlas index.\n *current_atlas += 1;\n if *current_atlas == atlas.len() {\n let new = Atlas::new(ATLAS_SIZE, is_gles_context);\n *active_tex = 0; // Atlas::new binds a texture. Ugh this is sloppy.\n atlas.push(new);\n }\n Atlas::load_glyph(active_tex, atlas, current_atlas, rasterized)\n },\n Err(AtlasInsertError::GlyphTooLarge) => Glyph {\n tex_id: atlas[*current_atlas].id,\n multicolor: false,\n top: 0,\n left: 0,\n width: 0,\n height: 0,\n uv_bot: 0.,\n uv_left: 0.,\n uv_width: 0.,\n uv_height: 0.,\n },\n }\n }\n\n #[inline]\n pub fn clear_atlas(atlas: &mut [Atlas], current_atlas: &mut usize) {\n for atlas in atlas.iter_mut() {\n atlas.clear();\n }\n *current_atlas = 0;\n }\n}",
"class_signature": "impl Atlas"
} |
builtin_glyph | alacritty-master/alacritty/src/renderer/text/builtin_font.rs | pub fn builtin_glyph(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
glyph_offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let mut glyph = match character {
// Box drawing characters and block elements.
'\u{2500}'..='\u{259f}' | '\u{1fb00}'..='\u{1fb3b}' => {
box_drawing(character, metrics, offset)
},
// Powerline symbols: '','','',''
POWERLINE_TRIANGLE_LTR..=POWERLINE_ARROW_RTL => {
powerline_drawing(character, metrics, offset)?
},
_ => return None,
};
// Since we want to ignore `glyph_offset` for the built-in font, subtract it to compensate its
// addition when loading glyphs in the renderer.
glyph.left -= glyph_offset.x as i32;
glyph.top -= glyph_offset.y as i32;
Some(glyph)
} | //! Hand-rolled drawing of unicode characters that need to fully cover their character area.
use std::{cmp, mem, ops};
use crossfont::{BitmapBuffer, Metrics, RasterizedGlyph};
use crate::config::ui_config::Delta;
// Colors which are used for filling shade variants.
const COLOR_FILL_ALPHA_STEP_1: Pixel = Pixel { _r: 192, _g: 192, _b: 192 };
const COLOR_FILL_ALPHA_STEP_2: Pixel = Pixel { _r: 128, _g: 128, _b: 128 };
const COLOR_FILL_ALPHA_STEP_3: Pixel = Pixel { _r: 64, _g: 64, _b: 64 };
/// Default color used for filling.
const COLOR_FILL: Pixel = Pixel { _r: 255, _g: 255, _b: 255 };
const POWERLINE_TRIANGLE_LTR: char = '\u{e0b0}';
const POWERLINE_ARROW_LTR: char = '\u{e0b1}';
const POWERLINE_TRIANGLE_RTL: char = '\u{e0b2}';
const POWERLINE_ARROW_RTL: char = '\u{e0b3}';
/// Returns the rasterized glyph if the character is part of the built-in font.
pub fn builtin_glyph(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
glyph_offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let mut glyph = match character {
// Box drawing characters and block elements.
'\u{2500}'..='\u{259f}' | '\u{1fb00}'..='\u{1fb3b}' => {
box_drawing(character, metrics, offset)
},
// Powerline symbols: '','','',''
POWERLINE_TRIANGLE_LTR..=POWERLINE_ARROW_RTL => {
powerline_drawing(character, metrics, offset)?
},
_ => return None,
};
// Since we want to ignore `glyph_offset` for the built-in font, subtract it to compensate its
// addition when loading glyphs in the renderer.
glyph.left -= glyph_offset.x as i32;
glyph.top -= glyph_offset.y as i32;
Some(glyph)
}
fn box_drawing(character: char, metrics: &Metrics, offset: &Delta<i8>) -> RasterizedGlyph {
// Ensure that width and height is at least one.
let height = (metrics.line_height as i32 + offset.y as i32).max(1) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32).max(1) as usize;
let stroke_size = calculate_stroke_size(width);
let heavy_stroke_size = stroke_size * 2;
// Certain symbols require larger canvas than the cell itself, since for proper contiguous
// lines they require drawing on neighbour cells. So treat them specially early on and handle
// 'normal' characters later.
let mut canvas = match character {
// Diagonals: '╱', '╲', '╳'.
'\u{2571}'..='\u{2573}' => {
// Last coordinates.
let x_end = width as f32;
let mut y_end = height as f32;
let top = height as i32 + metrics.descent as i32 + stroke_size as i32;
let height = height + 2 * stroke_size;
let mut canvas = Canvas::new(width, height + 2 * stroke_size);
// The offset that we should take into account when drawing, since we've enlarged
// buffer vertically by twice of that amount.
let y_offset = stroke_size as f32;
y_end += y_offset;
let k = y_end / x_end;
let f_x = |x: f32, h: f32| -> f32 { -1. * k * x + h + y_offset };
let g_x = |x: f32, h: f32| -> f32 { k * x + h + y_offset };
let from_x = 0.;
let to_x = x_end + 1.;
for stroke_size in 0..2 * stroke_size {
let stroke_size = stroke_size as f32 / 2.;
if character == '\u{2571}' || character == '\u{2573}' {
let h = y_end - stroke_size;
let from_y = f_x(from_x, h);
let to_y = f_x(to_x, h);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
if character == '\u{2572}' || character == '\u{2573}' {
let from_y = g_x(from_x, stroke_size);
let to_y = g_x(to_x, stroke_size);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
}
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
return RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
};
},
_ => Canvas::new(width, height),
};
match character {
// Horizontal dashes: '┄', '┅', '┈', '┉', '╌', '╍'.
'\u{2504}' | '\u{2505}' | '\u{2508}' | '\u{2509}' | '\u{254c}' | '\u{254d}' => {
let (num_gaps, stroke_size) = match character {
'\u{2504}' => (2, stroke_size),
'\u{2505}' => (2, heavy_stroke_size),
'\u{2508}' => (3, stroke_size),
'\u{2509}' => (3, heavy_stroke_size),
'\u{254c}' => (1, stroke_size),
'\u{254d}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(width / 8, 1);
let dash_len =
cmp::max(width.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let y = canvas.y_center();
for gap in 0..=num_gaps {
let x = cmp::min(gap * (dash_len + dash_gap_len), width);
canvas.draw_h_line(x as f32, y, dash_len as f32, stroke_size);
}
},
// Vertical dashes: '┆', '┇', '┊', '┋', '╎', '╏'.
'\u{2506}' | '\u{2507}' | '\u{250a}' | '\u{250b}' | '\u{254e}' | '\u{254f}' => {
let (num_gaps, stroke_size) = match character {
'\u{2506}' => (2, stroke_size),
'\u{2507}' => (2, heavy_stroke_size),
'\u{250a}' => (3, stroke_size),
'\u{250b}' => (3, heavy_stroke_size),
'\u{254e}' => (1, stroke_size),
'\u{254f}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(height / 8, 1);
let dash_len =
cmp::max(height.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let x = canvas.x_center();
for gap in 0..=num_gaps {
let y = cmp::min(gap * (dash_len + dash_gap_len), height);
canvas.draw_v_line(x, y as f32, dash_len as f32, stroke_size);
}
},
// Horizontal lines: '─', '━', '╴', '╶', '╸', '╺'.
// Vertical lines: '│', '┃', '╵', '╷', '╹', '╻'.
// Light and heavy line box components:
// '┌','┍','┎','┏','┐','┑','┒','┓','└','┕','┖','┗','┘','┙','┚','┛',├','┝','┞','┟','┠','┡',
// '┢','┣','┤','┥','┦','┧','┨','┩','┪','┫','┬','┭','┮','┯','┰','┱','┲','┳','┴','┵','┶','┷',
// '┸','┹','┺','┻','┼','┽','┾','┿','╀','╁','╂','╃','╄','╅','╆','╇','╈','╉','╊','╋'.
// Mixed light and heavy lines: '╼', '╽', '╾', '╿'.
'\u{2500}'..='\u{2503}' | '\u{250c}'..='\u{254b}' | '\u{2574}'..='\u{257f}' => {
// Left horizontal line.
let stroke_size_h1 = match character {
'\u{2500}' | '\u{2510}' | '\u{2512}' | '\u{2518}' | '\u{251a}' | '\u{2524}'
| '\u{2526}' | '\u{2527}' | '\u{2528}' | '\u{252c}' | '\u{252e}' | '\u{2530}'
| '\u{2532}' | '\u{2534}' | '\u{2536}' | '\u{2538}' | '\u{253a}' | '\u{253c}'
| '\u{253e}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2544}' | '\u{2546}'
| '\u{254a}' | '\u{2574}' | '\u{257c}' => stroke_size,
'\u{2501}' | '\u{2511}' | '\u{2513}' | '\u{2519}' | '\u{251b}' | '\u{2525}'
| '\u{2529}' | '\u{252a}' | '\u{252b}' | '\u{252d}' | '\u{252f}' | '\u{2531}'
| '\u{2533}' | '\u{2535}' | '\u{2537}' | '\u{2539}' | '\u{253b}' | '\u{253d}'
| '\u{253f}' | '\u{2543}' | '\u{2545}' | '\u{2547}' | '\u{2548}' | '\u{2549}'
| '\u{254b}' | '\u{2578}' | '\u{257e}' => heavy_stroke_size,
_ => 0,
};
// Right horizontal line.
let stroke_size_h2 = match character {
'\u{2500}' | '\u{250c}' | '\u{250e}' | '\u{2514}' | '\u{2516}' | '\u{251c}'
| '\u{251e}' | '\u{251f}' | '\u{2520}' | '\u{252c}' | '\u{252d}' | '\u{2530}'
| '\u{2531}' | '\u{2534}' | '\u{2535}' | '\u{2538}' | '\u{2539}' | '\u{253c}'
| '\u{253d}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2543}' | '\u{2545}'
| '\u{2549}' | '\u{2576}' | '\u{257e}' => stroke_size,
'\u{2501}' | '\u{250d}' | '\u{250f}' | '\u{2515}' | '\u{2517}' | '\u{251d}'
| '\u{2521}' | '\u{2522}' | '\u{2523}' | '\u{252e}' | '\u{252f}' | '\u{2532}'
| '\u{2533}' | '\u{2536}' | '\u{2537}' | '\u{253a}' | '\u{253b}' | '\u{253e}'
| '\u{253f}' | '\u{2544}' | '\u{2546}' | '\u{2547}' | '\u{2548}' | '\u{254a}'
| '\u{254b}' | '\u{257a}' | '\u{257c}' => heavy_stroke_size,
_ => 0,
};
// Top vertical line.
let stroke_size_v1 = match character {
'\u{2502}' | '\u{2514}' | '\u{2515}' | '\u{2518}' | '\u{2519}' | '\u{251c}'
| '\u{251d}' | '\u{251f}' | '\u{2522}' | '\u{2524}' | '\u{2525}' | '\u{2527}'
| '\u{252a}' | '\u{2534}' | '\u{2535}' | '\u{2536}' | '\u{2537}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2541}' | '\u{2545}' | '\u{2546}'
| '\u{2548}' | '\u{2575}' | '\u{257d}' => stroke_size,
'\u{2503}' | '\u{2516}' | '\u{2517}' | '\u{251a}' | '\u{251b}' | '\u{251e}'
| '\u{2520}' | '\u{2521}' | '\u{2523}' | '\u{2526}' | '\u{2528}' | '\u{2529}'
| '\u{252b}' | '\u{2538}' | '\u{2539}' | '\u{253a}' | '\u{253b}' | '\u{2540}'
| '\u{2542}' | '\u{2543}' | '\u{2544}' | '\u{2547}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{2579}' | '\u{257f}' => heavy_stroke_size,
_ => 0,
};
// Bottom vertical line.
let stroke_size_v2 = match character {
'\u{2502}' | '\u{250c}' | '\u{250d}' | '\u{2510}' | '\u{2511}' | '\u{251c}'
| '\u{251d}' | '\u{251e}' | '\u{2521}' | '\u{2524}' | '\u{2525}' | '\u{2526}'
| '\u{2529}' | '\u{252c}' | '\u{252d}' | '\u{252e}' | '\u{252f}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2540}' | '\u{2543}' | '\u{2544}'
| '\u{2547}' | '\u{2577}' | '\u{257f}' => stroke_size,
'\u{2503}' | '\u{250e}' | '\u{250f}' | '\u{2512}' | '\u{2513}' | '\u{251f}'
| '\u{2520}' | '\u{2522}' | '\u{2523}' | '\u{2527}' | '\u{2528}' | '\u{252a}'
| '\u{252b}' | '\u{2530}' | '\u{2531}' | '\u{2532}' | '\u{2533}' | '\u{2541}'
| '\u{2542}' | '\u{2545}' | '\u{2546}' | '\u{2548}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{257b}' | '\u{257d}' => heavy_stroke_size,
_ => 0,
};
let x_v = canvas.x_center();
let y_h = canvas.y_center();
let v_line_bounds_top = canvas.v_line_bounds(x_v, stroke_size_v1);
let v_line_bounds_bot = canvas.v_line_bounds(x_v, stroke_size_v2);
let h_line_bounds_left = canvas.h_line_bounds(y_h, stroke_size_h1);
let h_line_bounds_right = canvas.h_line_bounds(y_h, stroke_size_h2);
let size_h1 = cmp::max(v_line_bounds_top.1 as i32, v_line_bounds_bot.1 as i32) as f32;
let x_h = cmp::min(v_line_bounds_top.0 as i32, v_line_bounds_bot.0 as i32) as f32;
let size_h2 = width as f32 - x_h;
let size_v1 =
cmp::max(h_line_bounds_left.1 as i32, h_line_bounds_right.1 as i32) as f32;
let y_v = cmp::min(h_line_bounds_left.0 as i32, h_line_bounds_right.0 as i32) as f32;
let size_v2 = height as f32 - y_v;
// Left horizontal line.
canvas.draw_h_line(0., y_h, size_h1, stroke_size_h1);
// Right horizontal line.
canvas.draw_h_line(x_h, y_h, size_h2, stroke_size_h2);
// Top vertical line.
canvas.draw_v_line(x_v, 0., size_v1, stroke_size_v1);
// Bottom vertical line.
canvas.draw_v_line(x_v, y_v, size_v2, stroke_size_v2);
},
// Light and double line box components:
// '═','║','╒','╓','╔','╕','╖','╗','╘','╙','╚','╛','╜','╝','╞','╟','╠','╡','╢','╣','╤','╥',
// '╦','╧','╨','╩','╪','╫','╬'.
'\u{2550}'..='\u{256c}' => {
let v_lines = match character {
'\u{2552}' | '\u{2555}' | '\u{2558}' | '\u{255b}' | '\u{255e}' | '\u{2561}'
| '\u{2564}' | '\u{2567}' | '\u{256a}' => (canvas.x_center(), canvas.x_center()),
_ => {
let v_line_bounds = canvas.v_line_bounds(canvas.x_center(), stroke_size);
let left_line = cmp::max(v_line_bounds.0 as i32 - 1, 0) as f32;
let right_line = cmp::min(v_line_bounds.1 as i32 + 1, width as i32) as f32;
(left_line, right_line)
},
};
let h_lines = match character {
'\u{2553}' | '\u{2556}' | '\u{2559}' | '\u{255c}' | '\u{255f}' | '\u{2562}'
| '\u{2565}' | '\u{2568}' | '\u{256b}' => (canvas.y_center(), canvas.y_center()),
_ => {
let h_line_bounds = canvas.h_line_bounds(canvas.y_center(), stroke_size);
let top_line = cmp::max(h_line_bounds.0 as i32 - 1, 0) as f32;
let bottom_line = cmp::min(h_line_bounds.1 as i32 + 1, height as i32) as f32;
(top_line, bottom_line)
},
};
// Get bounds for each double line we could have.
let v_left_bounds = canvas.v_line_bounds(v_lines.0, stroke_size);
let v_right_bounds = canvas.v_line_bounds(v_lines.1, stroke_size);
let h_top_bounds = canvas.h_line_bounds(h_lines.0, stroke_size);
let h_bot_bounds = canvas.h_line_bounds(h_lines.1, stroke_size);
let height = height as f32;
let width = width as f32;
// Left horizontal part.
let (top_left_size, bot_left_size) = match character {
'\u{2550}' | '\u{256b}' => (canvas.x_center(), canvas.x_center()),
'\u{2555}'..='\u{2557}' => (v_right_bounds.1, v_left_bounds.1),
'\u{255b}'..='\u{255d}' => (v_left_bounds.1, v_right_bounds.1),
'\u{2561}'..='\u{2563}' | '\u{256a}' | '\u{256c}' => {
(v_left_bounds.1, v_left_bounds.1)
},
'\u{2564}'..='\u{2568}' => (canvas.x_center(), v_left_bounds.1),
'\u{2569}'..='\u{2569}' => (v_left_bounds.1, canvas.x_center()),
_ => (0., 0.),
};
// Right horizontal part.
let (top_right_x, bot_right_x, right_size) = match character {
'\u{2550}' | '\u{2565}' | '\u{256b}' => {
(canvas.x_center(), canvas.x_center(), width)
},
'\u{2552}'..='\u{2554}' | '\u{2568}' => (v_left_bounds.0, v_right_bounds.0, width),
'\u{2558}'..='\u{255a}' => (v_right_bounds.0, v_left_bounds.0, width),
'\u{255e}'..='\u{2560}' | '\u{256a}' | '\u{256c}' => {
(v_right_bounds.0, v_right_bounds.0, width)
},
'\u{2564}' | '\u{2566}' => (canvas.x_center(), v_right_bounds.0, width),
'\u{2567}' | '\u{2569}' => (v_right_bounds.0, canvas.x_center(), width),
_ => (0., 0., 0.),
};
// Top vertical part.
let (left_top_size, right_top_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center()),
'\u{2558}'..='\u{255c}' | '\u{2568}' => (h_bot_bounds.1, h_top_bounds.1),
'\u{255d}' => (h_top_bounds.1, h_bot_bounds.1),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_top_bounds.1),
'\u{2561}'..='\u{2563}' => (h_top_bounds.1, canvas.y_center()),
'\u{2567}' | '\u{2569}' | '\u{256b}' | '\u{256c}' => {
(h_top_bounds.1, h_top_bounds.1)
},
_ => (0., 0.),
};
// Bottom vertical part.
let (left_bot_y, right_bot_y, bottom_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center(), height),
'\u{2552}'..='\u{2554}' => (h_top_bounds.0, h_bot_bounds.0, height),
'\u{2555}'..='\u{2557}' => (h_bot_bounds.0, h_top_bounds.0, height),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_bot_bounds.0, height),
'\u{2561}'..='\u{2563}' => (h_bot_bounds.0, canvas.y_center(), height),
'\u{2564}'..='\u{2566}' | '\u{256b}' | '\u{256c}' => {
(h_bot_bounds.0, h_bot_bounds.0, height)
},
_ => (0., 0., 0.),
};
// Left horizontal line.
canvas.draw_h_line(0., h_lines.0, top_left_size, stroke_size);
canvas.draw_h_line(0., h_lines.1, bot_left_size, stroke_size);
// Right horizontal line.
canvas.draw_h_line(top_right_x, h_lines.0, right_size, stroke_size);
canvas.draw_h_line(bot_right_x, h_lines.1, right_size, stroke_size);
// Top vertical line.
canvas.draw_v_line(v_lines.0, 0., left_top_size, stroke_size);
canvas.draw_v_line(v_lines.1, 0., right_top_size, stroke_size);
// Bottom vertical line.
canvas.draw_v_line(v_lines.0, left_bot_y, bottom_size, stroke_size);
canvas.draw_v_line(v_lines.1, right_bot_y, bottom_size, stroke_size);
},
// Arcs: '╭', '╮', '╯', '╰'.
'\u{256d}' | '\u{256e}' | '\u{256f}' | '\u{2570}' => {
canvas.draw_ellipse_arc(stroke_size);
// Mirror `X` axis.
if character == '\u{256d}' || character == '\u{2570}' {
let center = canvas.x_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != width % 2);
let buffer = canvas.buffer_mut();
for y in 1..height {
let left = (y - 1) * width;
let right = y * width - 1;
if extra_offset != 0 {
buffer[right] = buffer[left];
}
for offset in 0..center {
buffer.swap(left + offset, right - offset - extra_offset);
}
}
}
// Mirror `Y` axis.
if character == '\u{256d}' || character == '\u{256e}' {
let center = canvas.y_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != height % 2);
let buffer = canvas.buffer_mut();
if extra_offset != 0 {
let bottom_row = (height - 1) * width;
for index in 0..width {
buffer[bottom_row + index] = buffer[index];
}
}
for offset in 1..=center {
let top_row = (offset - 1) * width;
let bottom_row = (height - offset - extra_offset) * width;
for index in 0..width {
buffer.swap(top_row + index, bottom_row + index);
}
}
}
},
// Parts of full block: '▀', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '▔', '▉', '▊', '▋', '▌',
// '▍', '▎', '▏', '▐', '▕'.
'\u{2580}'..='\u{2587}' | '\u{2589}'..='\u{2590}' | '\u{2594}' | '\u{2595}' => {
let width = width as f32;
let height = height as f32;
let mut rect_width = match character {
'\u{2589}' => width * 7. / 8.,
'\u{258a}' => width * 6. / 8.,
'\u{258b}' => width * 5. / 8.,
'\u{258c}' => width * 4. / 8.,
'\u{258d}' => width * 3. / 8.,
'\u{258e}' => width * 2. / 8.,
'\u{258f}' => width * 1. / 8.,
'\u{2590}' => width * 4. / 8.,
'\u{2595}' => width * 1. / 8.,
_ => width,
};
let (mut rect_height, mut y) = match character {
'\u{2580}' => (height * 4. / 8., height * 8. / 8.),
'\u{2581}' => (height * 1. / 8., height * 1. / 8.),
'\u{2582}' => (height * 2. / 8., height * 2. / 8.),
'\u{2583}' => (height * 3. / 8., height * 3. / 8.),
'\u{2584}' => (height * 4. / 8., height * 4. / 8.),
'\u{2585}' => (height * 5. / 8., height * 5. / 8.),
'\u{2586}' => (height * 6. / 8., height * 6. / 8.),
'\u{2587}' => (height * 7. / 8., height * 7. / 8.),
'\u{2594}' => (height * 1. / 8., height * 8. / 8.),
_ => (height, height),
};
// Fix `y` coordinates.
y = (height - y).round();
// Ensure that resulted glyph will be visible and also round sizes instead of straight
// flooring them.
rect_width = rect_width.round().max(1.);
rect_height = rect_height.round().max(1.);
let x = match character {
'\u{2590}' => canvas.x_center(),
'\u{2595}' => width - rect_width,
_ => 0.,
};
canvas.draw_rect(x, y, rect_width, rect_height, COLOR_FILL);
},
// Shades: '░', '▒', '▓', '█'.
'\u{2588}' | '\u{2591}' | '\u{2592}' | '\u{2593}' => {
let color = match character {
'\u{2588}' => COLOR_FILL,
'\u{2591}' => COLOR_FILL_ALPHA_STEP_3,
'\u{2592}' => COLOR_FILL_ALPHA_STEP_2,
'\u{2593}' => COLOR_FILL_ALPHA_STEP_1,
_ => unreachable!(),
};
canvas.fill(color);
},
// Quadrants: '▖', '▗', '▘', '▙', '▚', '▛', '▜', '▝', '▞', '▟'.
'\u{2596}'..='\u{259F}' => {
let x_center = canvas.x_center().round().max(1.);
let y_center = canvas.y_center().round().max(1.);
let (w_second, h_second) = match character {
'\u{2598}' | '\u{2599}' | '\u{259a}' | '\u{259b}' | '\u{259c}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_first, h_first) = match character {
'\u{259b}' | '\u{259c}' | '\u{259d}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_third, h_third) = match character {
'\u{2596}' | '\u{2599}' | '\u{259b}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_fourth, h_fourth) = match character {
'\u{2597}' | '\u{2599}' | '\u{259a}' | '\u{259c}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
// Second quadrant.
canvas.draw_rect(0., 0., w_second, h_second, COLOR_FILL);
// First quadrant.
canvas.draw_rect(x_center, 0., w_first, h_first, COLOR_FILL);
// Third quadrant.
canvas.draw_rect(0., y_center, w_third, h_third, COLOR_FILL);
// Fourth quadrant.
canvas.draw_rect(x_center, y_center, w_fourth, h_fourth, COLOR_FILL);
},
// Sextants: '🬀', '🬁', '🬂', '🬃', '🬄', '🬅', '🬆', '🬇', '🬈', '🬉', '🬊', '🬋', '🬌', '🬍', '🬎',
// '🬏', '🬐', '🬑', '🬒', '🬓', '🬔', '🬕', '🬖', '🬗', '🬘', '🬙', '🬚', '🬛', '🬜', '🬝', '🬞', '🬟',
// '🬠', '🬡', '🬢', '🬣', '🬤', '🬥', '🬦', '🬧', '🬨', '🬩', '🬪', '🬫', '🬬', '🬭', '🬮', '🬯', '🬰',
// '🬱', '🬲', '🬳', '🬴', '🬵', '🬶', '🬷', '🬸', '🬹', '🬺', '🬻'.
'\u{1fb00}'..='\u{1fb3b}' => {
let x_center = canvas.x_center().round().max(1.);
let y_third = (height as f32 / 3.).round().max(1.);
let y_last_third = height as f32 - 2. * y_third;
let (w_top_left, h_top_left) = match character {
'\u{1fb00}' | '\u{1fb02}' | '\u{1fb04}' | '\u{1fb06}' | '\u{1fb08}'
| '\u{1fb0a}' | '\u{1fb0c}' | '\u{1fb0e}' | '\u{1fb10}' | '\u{1fb12}'
| '\u{1fb15}' | '\u{1fb17}' | '\u{1fb19}' | '\u{1fb1b}' | '\u{1fb1d}'
| '\u{1fb1f}' | '\u{1fb21}' | '\u{1fb23}' | '\u{1fb25}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb2a}' | '\u{1fb2c}' | '\u{1fb2e}' | '\u{1fb30}'
| '\u{1fb32}' | '\u{1fb34}' | '\u{1fb36}' | '\u{1fb38}' | '\u{1fb3a}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_top_right, h_top_right) = match character {
'\u{1fb01}' | '\u{1fb02}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb09}'
| '\u{1fb0a}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb11}' | '\u{1fb12}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb24}' | '\u{1fb25}'
| '\u{1fb28}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb2f}' | '\u{1fb30}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb37}' | '\u{1fb38}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_left, h_mid_left) = match character {
'\u{1fb03}' | '\u{1fb04}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb13}' | '\u{1fb14}'
| '\u{1fb15}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb22}' | '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb31}' | '\u{1fb32}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_right, h_mid_right) = match character {
'\u{1fb07}' | '\u{1fb08}' | '\u{1fb09}' | '\u{1fb0a}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb16}' | '\u{1fb17}'
| '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb26}' | '\u{1fb27}' | '\u{1fb28}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_bottom_left, h_bottom_left) = match character {
'\u{1fb0f}' | '\u{1fb10}' | '\u{1fb11}' | '\u{1fb12}' | '\u{1fb13}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb16}' | '\u{1fb17}' | '\u{1fb18}'
| '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
let (w_bottom_right, h_bottom_right) = match character {
'\u{1fb1e}' | '\u{1fb1f}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb22}'
| '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb26}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb29}' | '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
canvas.draw_rect(0., 0., w_top_left, h_top_left, COLOR_FILL);
canvas.draw_rect(x_center, 0., w_top_right, h_top_right, COLOR_FILL);
canvas.draw_rect(0., y_third, w_mid_left, h_mid_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third, w_mid_right, h_mid_right, COLOR_FILL);
canvas.draw_rect(0., y_third * 2., w_bottom_left, h_bottom_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third * 2., w_bottom_right, h_bottom_right, COLOR_FILL);
},
_ => unreachable!(),
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
}
}
fn powerline_drawing(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let height = (metrics.line_height as i32 + offset.y as i32) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32) as usize;
let extra_thickness = calculate_stroke_size(width) as i32 - 1;
let mut canvas = Canvas::new(width, height);
let slope = 1;
let top_y = 1;
let bottom_y = height as i32 - top_y - 1;
// Start with offset `1` and draw until the intersection of the f(x) = slope * x + 1 and
// g(x) = H - slope * x - 1 lines. The intersection happens when f(x) = g(x), which is at
// x = (H - 2) / (2 * slope).
let x_intersection = (height as i32 + 1) / 2 - 1;
// Don't use built-in font if we'd cut the tip too much, for example when the font is really
// narrow.
if x_intersection - width as i32 > 1 {
return None;
}
let top_line = (0..x_intersection).map(|x| line_equation(slope, x, top_y));
let bottom_line = (0..x_intersection).map(|x| line_equation(-slope, x, bottom_y));
// Inner lines to make arrows thicker.
let mut top_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(slope, x, top_y + extra_thickness));
let mut bottom_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(-slope, x, bottom_y - extra_thickness));
// NOTE: top_line and bottom_line have the same amount of iterations.
for (p1, p2) in top_line.zip(bottom_line) {
if character == POWERLINE_TRIANGLE_LTR || character == POWERLINE_TRIANGLE_RTL {
canvas.draw_rect(0., p1.1, p1.0 + 1., 1., COLOR_FILL);
canvas.draw_rect(0., p2.1, p2.0 + 1., 1., COLOR_FILL);
} else if character == POWERLINE_ARROW_LTR || character == POWERLINE_ARROW_RTL {
let p3 = top_inner_line.next().unwrap_or(p2);
let p4 = bottom_inner_line.next().unwrap_or(p1);
// If we can't fit the entire arrow in the cell, we cut off the tip of the arrow by
// drawing a rectangle between the two lines.
if p1.0 as usize + 1 == width {
canvas.draw_rect(p1.0, p1.1, 1., p2.1 - p1.1 + 1., COLOR_FILL);
break;
} else {
canvas.draw_rect(p1.0, p1.1, 1., p3.1 - p1.1 + 1., COLOR_FILL);
canvas.draw_rect(p4.0, p4.1, 1., p2.1 - p4.1 + 1., COLOR_FILL);
}
}
}
if character == POWERLINE_TRIANGLE_RTL || character == POWERLINE_ARROW_RTL {
canvas.flip_horizontal();
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
Some(RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
})
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct Pixel {
_r: u8,
_g: u8,
_b: u8,
}
impl Pixel {
fn gray(color: u8) -> Self {
Self { _r: color, _g: color, _b: color }
}
}
impl ops::Add for Pixel {
type Output = Pixel;
fn add(self, rhs: Pixel) -> Self::Output {
let _r = self._r.saturating_add(rhs._r);
let _g = self._g.saturating_add(rhs._g);
let _b = self._b.saturating_add(rhs._b);
Pixel { _r, _g, _b }
}
}
impl ops::Div<u8> for Pixel {
type Output = Pixel;
fn div(self, rhs: u8) -> Self::Output {
let _r = self._r / rhs;
let _g = self._g / rhs;
let _b = self._b / rhs;
Pixel { _r, _g, _b }
}
}
/// Canvas which is used for simple line drawing operations.
///
/// The coordinate system is the following:
///
/// 0 x
/// --------------→
/// |
/// |
/// |
/// |
/// |
/// |
/// y↓
struct Canvas {
/// Canvas width.
width: usize,
/// Canvas height.
height: usize,
/// Canvas buffer we draw on.
buffer: Vec<Pixel>,
}
impl Canvas {
/// Builds new `Canvas` for line drawing with the given `width` and `height` with default color.
fn new(width: usize, height: usize) -> Self {
let buffer = vec![Pixel::default(); width * height];
Self { width, height, buffer }
}
/// Vertical center of the `Canvas`.
fn y_center(&self) -> f32 {
self.height as f32 / 2.
}
/// Horizontal center of the `Canvas`.
fn x_center(&self) -> f32 {
self.width as f32 / 2.
}
/// Canvas underlying buffer for direct manipulation
fn buffer_mut(&mut self) -> &mut [Pixel] {
&mut self.buffer
}
/// Gives bounds for horizontal straight line on `y` with `stroke_size`.
fn h_line_bounds(&self, y: f32, stroke_size: usize) -> (f32, f32) {
let start_y = cmp::max((y - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_y = cmp::min((y + stroke_size as f32 / 2.) as i32, self.height as i32) as f32;
(start_y, end_y)
}
/// Gives bounds for vertical straight line on `y` with `stroke_size`.
fn v_line_bounds(&self, x: f32, stroke_size: usize) -> (f32, f32) {
let start_x = cmp::max((x - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_x = cmp::min((x + stroke_size as f32 / 2.) as i32, self.width as i32) as f32;
(start_x, end_x)
}
/// Flip horizontally.
fn flip_horizontal(&mut self) {
for row in 0..self.height {
for col in 0..self.width / 2 {
let index = row * self.width;
self.buffer.swap(index + col, index + self.width - col - 1)
}
}
}
/// Draws a horizontal straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_h_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_y, end_y) = self.h_line_bounds(y, stroke_size);
self.draw_rect(x, start_y, size, end_y - start_y, COLOR_FILL);
}
/// Draws a vertical straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_v_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_x, end_x) = self.v_line_bounds(x, stroke_size);
self.draw_rect(start_x, y, end_x - start_x, size, COLOR_FILL);
}
/// Draws a rect from the (`x`, `y`) of the given `width` and `height` using `color`.
fn draw_rect(&mut self, x: f32, y: f32, width: f32, height: f32, color: Pixel) {
let start_x = x as usize;
let end_x = cmp::min((x + width) as usize, self.width);
let start_y = y as usize;
let end_y = cmp::min((y + height) as usize, self.height);
for y in start_y..end_y {
let y = y * self.width;
self.buffer[start_x + y..end_x + y].fill(color);
}
}
/// Put pixel into buffer with the given color if the color is brighter than the one buffer
/// already has in place.
#[inline]
fn put_pixel(&mut self, x: f32, y: f32, color: Pixel) {
if x < 0. || y < 0. || x > self.width as f32 - 1. || y > self.height as f32 - 1. {
return;
}
let index = x as usize + y as usize * self.width;
if color._r > self.buffer[index]._r {
self.buffer[index] = color;
}
}
/// Xiaolin Wu's line drawing from (`from_x`, `from_y`) to (`to_x`, `to_y`).
fn draw_line(&mut self, mut from_x: f32, mut from_y: f32, mut to_x: f32, mut to_y: f32) {
let steep = (to_y - from_y).abs() > (to_x - from_x).abs();
if steep {
mem::swap(&mut from_x, &mut from_y);
mem::swap(&mut to_x, &mut to_y);
}
if from_x > to_x {
mem::swap(&mut from_x, &mut to_x);
mem::swap(&mut from_y, &mut to_y);
}
let delta_x = to_x - from_x;
let delta_y = to_y - from_y;
let gradient = if delta_x.abs() <= f32::EPSILON { 1. } else { delta_y / delta_x };
let x_end = f32::round(from_x);
let y_end = from_y + gradient * (x_end - from_x);
let x_gap = 1. - (from_x + 0.5).fract();
let xpxl1 = x_end;
let ypxl1 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl1, xpxl1, color_1);
self.put_pixel(ypxl1 + 1., xpxl1, color_2);
} else {
self.put_pixel(xpxl1, ypxl1, color_1);
self.put_pixel(xpxl1 + 1., ypxl1, color_2);
}
let mut intery = y_end + gradient;
let x_end = f32::round(to_x);
let y_end = to_y + gradient * (x_end - to_x);
let x_gap = (to_x + 0.5).fract();
let xpxl2 = x_end;
let ypxl2 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl2, xpxl2, color_1);
self.put_pixel(ypxl2 + 1., xpxl2, color_2);
} else {
self.put_pixel(xpxl2, ypxl2, color_1);
self.put_pixel(xpxl2, ypxl2 + 1., color_2);
}
if steep {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(intery.trunc(), x as f32, color_1);
self.put_pixel(intery.trunc() + 1., x as f32, color_2);
intery += gradient;
}
} else {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(x as f32, intery.trunc(), color_1);
self.put_pixel(x as f32, intery.trunc() + 1., color_2);
intery += gradient;
}
}
}
/// Draws a part of an ellipse centered in `(0., 0.)` with `self.x_center()` and `self.y_center`
/// vertex and co-vertex respectively using a given `stroke` in the bottom-right quadrant of the
/// `Canvas` coordinate system.
fn draw_ellipse_arc(&mut self, stroke_size: usize) {
fn colors_with_error(error: f32, max_transparency: f32) -> (Pixel, Pixel) {
let transparency = error * max_transparency;
let alpha_1 = 1. - transparency;
let alpha_2 = 1. - (max_transparency - transparency);
let color_1 = Pixel::gray((COLOR_FILL._r as f32 * alpha_1) as u8);
let color_2 = Pixel::gray((COLOR_FILL._r as f32 * alpha_2) as u8);
(color_1, color_2)
}
let h_line_bounds = self.h_line_bounds(self.y_center(), stroke_size);
let v_line_bounds = self.v_line_bounds(self.x_center(), stroke_size);
let h_line_bounds = (h_line_bounds.0 as usize, h_line_bounds.1 as usize);
let v_line_bounds = (v_line_bounds.0 as usize, v_line_bounds.1 as usize);
let max_transparency = 0.5;
for (radius_y, radius_x) in
(h_line_bounds.0..h_line_bounds.1).zip(v_line_bounds.0..v_line_bounds.1)
{
let radius_x = radius_x as f32;
let radius_y = radius_y as f32;
let radius_x2 = radius_x * radius_x;
let radius_y2 = radius_y * radius_y;
let quarter = f32::round(radius_x2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for x in 0..=quarter {
let x = x as f32;
let y = radius_y * f32::sqrt(1. - x * x / radius_x2);
let error = y.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x = x.clamp(0., radius_x);
let y_next = (y + 1.).clamp(0., h_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., h_line_bounds.1 as f32 - 1.);
self.put_pixel(x, y, color_1);
self.put_pixel(x, y_next, color_2);
}
let quarter = f32::round(radius_y2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for y in 0..=quarter {
let y = y as f32;
let x = radius_x * f32::sqrt(1. - y * y / radius_y2);
let error = x - x.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x_next = (x + 1.).clamp(0., v_line_bounds.1 as f32 - 1.);
let x = x.clamp(0., v_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., radius_y);
self.put_pixel(x, y, color_1);
self.put_pixel(x_next, y, color_2);
}
}
// Ensure the part closer to edges is properly filled.
self.draw_h_line(0., self.y_center(), stroke_size as f32, stroke_size);
self.draw_v_line(self.x_center(), 0., stroke_size as f32, stroke_size);
// Fill the resulted arc, since it could have gaps in-between.
for y in 0..self.height {
let row = y * self.width;
let left = match self.buffer[row..row + self.width].iter().position(|p| p._r != 0) {
Some(left) => row + left,
_ => continue,
};
let right = match self.buffer[row..row + self.width].iter().rposition(|p| p._r != 0) {
Some(right) => row + right,
_ => continue,
};
for index in left + 1..right {
self.buffer[index] =
self.buffer[index] + self.buffer[index - 1] / 2 + self.buffer[index + 1] / 2;
}
}
}
/// Fills the `Canvas` with the given `Color`.
fn fill(&mut self, color: Pixel) {
self.buffer.fill(color);
}
/// Consumes `Canvas` and returns its underlying storage as raw byte vector.
fn into_raw(self) -> Vec<u8> {
// SAFETY This is safe since we use `repr(packed)` on `Pixel` struct for underlying storage
// of the `Canvas` buffer which consists of three u8 values.
unsafe {
let capacity = self.buffer.capacity() * mem::size_of::<Pixel>();
let len = self.buffer.len() * mem::size_of::<Pixel>();
let buf = self.buffer.as_ptr() as *mut u8;
mem::forget(self.buffer);
Vec::from_raw_parts(buf, len, capacity)
}
}
}
/// Compute line width.
fn calculate_stroke_size(cell_width: usize) -> usize {
// Use one eight of the cell width, since this is used as a step size for block elements.
cmp::max((cell_width as f32 / 8.).round() as usize, 1)
}
/// `f(x) = slope * x + offset` equation.
fn line_equation(slope: i32, x: i32, offset: i32) -> (f32, f32) {
(x as f32, (slope * x + offset) as f32)
}
#[cfg(test)]
mod tests {
use super::*;
use crossfont::Metrics;
// Dummy metrics values to test builtin glyphs coverage.
const METRICS: Metrics = Metrics {
average_advance: 6.,
line_height: 16.,
descent: 4.,
underline_position: 2.,
underline_thickness: 2.,
strikeout_position: 2.,
strikeout_thickness: 2.,
};
#[test]
fn builtin_line_drawing_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in ('\u{2500}'..='\u{259f}').chain('\u{1fb00}'..='\u{1fb3b}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{2450}'..'\u{2500}').chain('\u{25a0}'..'\u{2600}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
#[test]
fn builtin_powerline_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in '\u{e0b0}'..='\u{e0b3}' {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{e0a0}'..'\u{e0b0}').chain('\u{e0b4}'..'\u{e0c0}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Metrics {\n pub average_advance: f64,\n pub line_height: f64,\n pub descent: f32,\n pub underline_position: f32,\n pub underline_thickness: f32,\n pub strikeout_position: f32,\n pub strikeout_thickness: f32,\n}"
],
"name": "metrics",
"type": "&Metrics"
},
{
"definitions": [
"pub struct Delta<T: Default> {\n /// Horizontal change.\n pub x: T,\n /// Vertical change.\n pub y: T,\n}"
],
"name": "offset",
"type": "&Delta<i8>"
},
{
"definitions": [
"pub struct Delta<T: Default> {\n /// Horizontal change.\n pub x: T,\n /// Vertical change.\n pub y: T,\n}"
],
"name": "glyph_offset",
"type": "&Delta<i8>"
}
],
"end_line": 47,
"name": "builtin_glyph",
"signature": "pub fn builtin_glyph(\n character: char,\n metrics: &Metrics,\n offset: &Delta<i8>,\n glyph_offset: &Delta<i8>,\n) -> Option<RasterizedGlyph>",
"start_line": 23
} | {
"class_name": "",
"class_signature": ""
} |
box_drawing | alacritty-master/alacritty/src/renderer/text/builtin_font.rs | fn box_drawing(character: char, metrics: &Metrics, offset: &Delta<i8>) -> RasterizedGlyph {
// Ensure that width and height is at least one.
let height = (metrics.line_height as i32 + offset.y as i32).max(1) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32).max(1) as usize;
let stroke_size = calculate_stroke_size(width);
let heavy_stroke_size = stroke_size * 2;
// Certain symbols require larger canvas than the cell itself, since for proper contiguous
// lines they require drawing on neighbour cells. So treat them specially early on and handle
// 'normal' characters later.
let mut canvas = match character {
// Diagonals: '╱', '╲', '╳'.
'\u{2571}'..='\u{2573}' => {
// Last coordinates.
let x_end = width as f32;
let mut y_end = height as f32;
let top = height as i32 + metrics.descent as i32 + stroke_size as i32;
let height = height + 2 * stroke_size;
let mut canvas = Canvas::new(width, height + 2 * stroke_size);
// The offset that we should take into account when drawing, since we've enlarged
// buffer vertically by twice of that amount.
let y_offset = stroke_size as f32;
y_end += y_offset;
let k = y_end / x_end;
let f_x = |x: f32, h: f32| -> f32 { -1. * k * x + h + y_offset };
let g_x = |x: f32, h: f32| -> f32 { k * x + h + y_offset };
let from_x = 0.;
let to_x = x_end + 1.;
for stroke_size in 0..2 * stroke_size {
let stroke_size = stroke_size as f32 / 2.;
if character == '\u{2571}' || character == '\u{2573}' {
let h = y_end - stroke_size;
let from_y = f_x(from_x, h);
let to_y = f_x(to_x, h);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
if character == '\u{2572}' || character == '\u{2573}' {
let from_y = g_x(from_x, stroke_size);
let to_y = g_x(to_x, stroke_size);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
}
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
return RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
};
},
_ => Canvas::new(width, height),
};
match character {
// Horizontal dashes: '┄', '┅', '┈', '┉', '╌', '╍'.
'\u{2504}' | '\u{2505}' | '\u{2508}' | '\u{2509}' | '\u{254c}' | '\u{254d}' => {
let (num_gaps, stroke_size) = match character {
'\u{2504}' => (2, stroke_size),
'\u{2505}' => (2, heavy_stroke_size),
'\u{2508}' => (3, stroke_size),
'\u{2509}' => (3, heavy_stroke_size),
'\u{254c}' => (1, stroke_size),
'\u{254d}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(width / 8, 1);
let dash_len =
cmp::max(width.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let y = canvas.y_center();
for gap in 0..=num_gaps {
let x = cmp::min(gap * (dash_len + dash_gap_len), width);
canvas.draw_h_line(x as f32, y, dash_len as f32, stroke_size);
}
},
// Vertical dashes: '┆', '┇', '┊', '┋', '╎', '╏'.
'\u{2506}' | '\u{2507}' | '\u{250a}' | '\u{250b}' | '\u{254e}' | '\u{254f}' => {
let (num_gaps, stroke_size) = match character {
'\u{2506}' => (2, stroke_size),
'\u{2507}' => (2, heavy_stroke_size),
'\u{250a}' => (3, stroke_size),
'\u{250b}' => (3, heavy_stroke_size),
'\u{254e}' => (1, stroke_size),
'\u{254f}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(height / 8, 1);
let dash_len =
cmp::max(height.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let x = canvas.x_center();
for gap in 0..=num_gaps {
let y = cmp::min(gap * (dash_len + dash_gap_len), height);
canvas.draw_v_line(x, y as f32, dash_len as f32, stroke_size);
}
},
// Horizontal lines: '─', '━', '╴', '╶', '╸', '╺'.
// Vertical lines: '│', '┃', '╵', '╷', '╹', '╻'.
// Light and heavy line box components:
// '┌','┍','┎','┏','┐','┑','┒','┓','└','┕','┖','┗','┘','┙','┚','┛',├','┝','┞','┟','┠','┡',
// '┢','┣','┤','┥','┦','┧','┨','┩','┪','┫','┬','┭','┮','┯','┰','┱','┲','┳','┴','┵','┶','┷',
// '┸','┹','┺','┻','┼','┽','┾','┿','╀','╁','╂','╃','╄','╅','╆','╇','╈','╉','╊','╋'.
// Mixed light and heavy lines: '╼', '╽', '╾', '╿'.
'\u{2500}'..='\u{2503}' | '\u{250c}'..='\u{254b}' | '\u{2574}'..='\u{257f}' => {
// Left horizontal line.
let stroke_size_h1 = match character {
'\u{2500}' | '\u{2510}' | '\u{2512}' | '\u{2518}' | '\u{251a}' | '\u{2524}'
| '\u{2526}' | '\u{2527}' | '\u{2528}' | '\u{252c}' | '\u{252e}' | '\u{2530}'
| '\u{2532}' | '\u{2534}' | '\u{2536}' | '\u{2538}' | '\u{253a}' | '\u{253c}'
| '\u{253e}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2544}' | '\u{2546}'
| '\u{254a}' | '\u{2574}' | '\u{257c}' => stroke_size,
'\u{2501}' | '\u{2511}' | '\u{2513}' | '\u{2519}' | '\u{251b}' | '\u{2525}'
| '\u{2529}' | '\u{252a}' | '\u{252b}' | '\u{252d}' | '\u{252f}' | '\u{2531}'
| '\u{2533}' | '\u{2535}' | '\u{2537}' | '\u{2539}' | '\u{253b}' | '\u{253d}'
| '\u{253f}' | '\u{2543}' | '\u{2545}' | '\u{2547}' | '\u{2548}' | '\u{2549}'
| '\u{254b}' | '\u{2578}' | '\u{257e}' => heavy_stroke_size,
_ => 0,
};
// Right horizontal line.
let stroke_size_h2 = match character {
'\u{2500}' | '\u{250c}' | '\u{250e}' | '\u{2514}' | '\u{2516}' | '\u{251c}'
| '\u{251e}' | '\u{251f}' | '\u{2520}' | '\u{252c}' | '\u{252d}' | '\u{2530}'
| '\u{2531}' | '\u{2534}' | '\u{2535}' | '\u{2538}' | '\u{2539}' | '\u{253c}'
| '\u{253d}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2543}' | '\u{2545}'
| '\u{2549}' | '\u{2576}' | '\u{257e}' => stroke_size,
'\u{2501}' | '\u{250d}' | '\u{250f}' | '\u{2515}' | '\u{2517}' | '\u{251d}'
| '\u{2521}' | '\u{2522}' | '\u{2523}' | '\u{252e}' | '\u{252f}' | '\u{2532}'
| '\u{2533}' | '\u{2536}' | '\u{2537}' | '\u{253a}' | '\u{253b}' | '\u{253e}'
| '\u{253f}' | '\u{2544}' | '\u{2546}' | '\u{2547}' | '\u{2548}' | '\u{254a}'
| '\u{254b}' | '\u{257a}' | '\u{257c}' => heavy_stroke_size,
_ => 0,
};
// Top vertical line.
let stroke_size_v1 = match character {
'\u{2502}' | '\u{2514}' | '\u{2515}' | '\u{2518}' | '\u{2519}' | '\u{251c}'
| '\u{251d}' | '\u{251f}' | '\u{2522}' | '\u{2524}' | '\u{2525}' | '\u{2527}'
| '\u{252a}' | '\u{2534}' | '\u{2535}' | '\u{2536}' | '\u{2537}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2541}' | '\u{2545}' | '\u{2546}'
| '\u{2548}' | '\u{2575}' | '\u{257d}' => stroke_size,
'\u{2503}' | '\u{2516}' | '\u{2517}' | '\u{251a}' | '\u{251b}' | '\u{251e}'
| '\u{2520}' | '\u{2521}' | '\u{2523}' | '\u{2526}' | '\u{2528}' | '\u{2529}'
| '\u{252b}' | '\u{2538}' | '\u{2539}' | '\u{253a}' | '\u{253b}' | '\u{2540}'
| '\u{2542}' | '\u{2543}' | '\u{2544}' | '\u{2547}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{2579}' | '\u{257f}' => heavy_stroke_size,
_ => 0,
};
// Bottom vertical line.
let stroke_size_v2 = match character {
'\u{2502}' | '\u{250c}' | '\u{250d}' | '\u{2510}' | '\u{2511}' | '\u{251c}'
| '\u{251d}' | '\u{251e}' | '\u{2521}' | '\u{2524}' | '\u{2525}' | '\u{2526}'
| '\u{2529}' | '\u{252c}' | '\u{252d}' | '\u{252e}' | '\u{252f}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2540}' | '\u{2543}' | '\u{2544}'
| '\u{2547}' | '\u{2577}' | '\u{257f}' => stroke_size,
'\u{2503}' | '\u{250e}' | '\u{250f}' | '\u{2512}' | '\u{2513}' | '\u{251f}'
| '\u{2520}' | '\u{2522}' | '\u{2523}' | '\u{2527}' | '\u{2528}' | '\u{252a}'
| '\u{252b}' | '\u{2530}' | '\u{2531}' | '\u{2532}' | '\u{2533}' | '\u{2541}'
| '\u{2542}' | '\u{2545}' | '\u{2546}' | '\u{2548}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{257b}' | '\u{257d}' => heavy_stroke_size,
_ => 0,
};
let x_v = canvas.x_center();
let y_h = canvas.y_center();
let v_line_bounds_top = canvas.v_line_bounds(x_v, stroke_size_v1);
let v_line_bounds_bot = canvas.v_line_bounds(x_v, stroke_size_v2);
let h_line_bounds_left = canvas.h_line_bounds(y_h, stroke_size_h1);
let h_line_bounds_right = canvas.h_line_bounds(y_h, stroke_size_h2);
let size_h1 = cmp::max(v_line_bounds_top.1 as i32, v_line_bounds_bot.1 as i32) as f32;
let x_h = cmp::min(v_line_bounds_top.0 as i32, v_line_bounds_bot.0 as i32) as f32;
let size_h2 = width as f32 - x_h;
let size_v1 =
cmp::max(h_line_bounds_left.1 as i32, h_line_bounds_right.1 as i32) as f32;
let y_v = cmp::min(h_line_bounds_left.0 as i32, h_line_bounds_right.0 as i32) as f32;
let size_v2 = height as f32 - y_v;
// Left horizontal line.
canvas.draw_h_line(0., y_h, size_h1, stroke_size_h1);
// Right horizontal line.
canvas.draw_h_line(x_h, y_h, size_h2, stroke_size_h2);
// Top vertical line.
canvas.draw_v_line(x_v, 0., size_v1, stroke_size_v1);
// Bottom vertical line.
canvas.draw_v_line(x_v, y_v, size_v2, stroke_size_v2);
},
// Light and double line box components:
// '═','║','╒','╓','╔','╕','╖','╗','╘','╙','╚','╛','╜','╝','╞','╟','╠','╡','╢','╣','╤','╥',
// '╦','╧','╨','╩','╪','╫','╬'.
'\u{2550}'..='\u{256c}' => {
let v_lines = match character {
'\u{2552}' | '\u{2555}' | '\u{2558}' | '\u{255b}' | '\u{255e}' | '\u{2561}'
| '\u{2564}' | '\u{2567}' | '\u{256a}' => (canvas.x_center(), canvas.x_center()),
_ => {
let v_line_bounds = canvas.v_line_bounds(canvas.x_center(), stroke_size);
let left_line = cmp::max(v_line_bounds.0 as i32 - 1, 0) as f32;
let right_line = cmp::min(v_line_bounds.1 as i32 + 1, width as i32) as f32;
(left_line, right_line)
},
};
let h_lines = match character {
'\u{2553}' | '\u{2556}' | '\u{2559}' | '\u{255c}' | '\u{255f}' | '\u{2562}'
| '\u{2565}' | '\u{2568}' | '\u{256b}' => (canvas.y_center(), canvas.y_center()),
_ => {
let h_line_bounds = canvas.h_line_bounds(canvas.y_center(), stroke_size);
let top_line = cmp::max(h_line_bounds.0 as i32 - 1, 0) as f32;
let bottom_line = cmp::min(h_line_bounds.1 as i32 + 1, height as i32) as f32;
(top_line, bottom_line)
},
};
// Get bounds for each double line we could have.
let v_left_bounds = canvas.v_line_bounds(v_lines.0, stroke_size);
let v_right_bounds = canvas.v_line_bounds(v_lines.1, stroke_size);
let h_top_bounds = canvas.h_line_bounds(h_lines.0, stroke_size);
let h_bot_bounds = canvas.h_line_bounds(h_lines.1, stroke_size);
let height = height as f32;
let width = width as f32;
// Left horizontal part.
let (top_left_size, bot_left_size) = match character {
'\u{2550}' | '\u{256b}' => (canvas.x_center(), canvas.x_center()),
'\u{2555}'..='\u{2557}' => (v_right_bounds.1, v_left_bounds.1),
'\u{255b}'..='\u{255d}' => (v_left_bounds.1, v_right_bounds.1),
'\u{2561}'..='\u{2563}' | '\u{256a}' | '\u{256c}' => {
(v_left_bounds.1, v_left_bounds.1)
},
'\u{2564}'..='\u{2568}' => (canvas.x_center(), v_left_bounds.1),
'\u{2569}'..='\u{2569}' => (v_left_bounds.1, canvas.x_center()),
_ => (0., 0.),
};
// Right horizontal part.
let (top_right_x, bot_right_x, right_size) = match character {
'\u{2550}' | '\u{2565}' | '\u{256b}' => {
(canvas.x_center(), canvas.x_center(), width)
},
'\u{2552}'..='\u{2554}' | '\u{2568}' => (v_left_bounds.0, v_right_bounds.0, width),
'\u{2558}'..='\u{255a}' => (v_right_bounds.0, v_left_bounds.0, width),
'\u{255e}'..='\u{2560}' | '\u{256a}' | '\u{256c}' => {
(v_right_bounds.0, v_right_bounds.0, width)
},
'\u{2564}' | '\u{2566}' => (canvas.x_center(), v_right_bounds.0, width),
'\u{2567}' | '\u{2569}' => (v_right_bounds.0, canvas.x_center(), width),
_ => (0., 0., 0.),
};
// Top vertical part.
let (left_top_size, right_top_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center()),
'\u{2558}'..='\u{255c}' | '\u{2568}' => (h_bot_bounds.1, h_top_bounds.1),
'\u{255d}' => (h_top_bounds.1, h_bot_bounds.1),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_top_bounds.1),
'\u{2561}'..='\u{2563}' => (h_top_bounds.1, canvas.y_center()),
'\u{2567}' | '\u{2569}' | '\u{256b}' | '\u{256c}' => {
(h_top_bounds.1, h_top_bounds.1)
},
_ => (0., 0.),
};
// Bottom vertical part.
let (left_bot_y, right_bot_y, bottom_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center(), height),
'\u{2552}'..='\u{2554}' => (h_top_bounds.0, h_bot_bounds.0, height),
'\u{2555}'..='\u{2557}' => (h_bot_bounds.0, h_top_bounds.0, height),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_bot_bounds.0, height),
'\u{2561}'..='\u{2563}' => (h_bot_bounds.0, canvas.y_center(), height),
'\u{2564}'..='\u{2566}' | '\u{256b}' | '\u{256c}' => {
(h_bot_bounds.0, h_bot_bounds.0, height)
},
_ => (0., 0., 0.),
};
// Left horizontal line.
canvas.draw_h_line(0., h_lines.0, top_left_size, stroke_size);
canvas.draw_h_line(0., h_lines.1, bot_left_size, stroke_size);
// Right horizontal line.
canvas.draw_h_line(top_right_x, h_lines.0, right_size, stroke_size);
canvas.draw_h_line(bot_right_x, h_lines.1, right_size, stroke_size);
// Top vertical line.
canvas.draw_v_line(v_lines.0, 0., left_top_size, stroke_size);
canvas.draw_v_line(v_lines.1, 0., right_top_size, stroke_size);
// Bottom vertical line.
canvas.draw_v_line(v_lines.0, left_bot_y, bottom_size, stroke_size);
canvas.draw_v_line(v_lines.1, right_bot_y, bottom_size, stroke_size);
},
// Arcs: '╭', '╮', '╯', '╰'.
'\u{256d}' | '\u{256e}' | '\u{256f}' | '\u{2570}' => {
canvas.draw_ellipse_arc(stroke_size);
// Mirror `X` axis.
if character == '\u{256d}' || character == '\u{2570}' {
let center = canvas.x_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != width % 2);
let buffer = canvas.buffer_mut();
for y in 1..height {
let left = (y - 1) * width;
let right = y * width - 1;
if extra_offset != 0 {
buffer[right] = buffer[left];
}
for offset in 0..center {
buffer.swap(left + offset, right - offset - extra_offset);
}
}
}
// Mirror `Y` axis.
if character == '\u{256d}' || character == '\u{256e}' {
let center = canvas.y_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != height % 2);
let buffer = canvas.buffer_mut();
if extra_offset != 0 {
let bottom_row = (height - 1) * width;
for index in 0..width {
buffer[bottom_row + index] = buffer[index];
}
}
for offset in 1..=center {
let top_row = (offset - 1) * width;
let bottom_row = (height - offset - extra_offset) * width;
for index in 0..width {
buffer.swap(top_row + index, bottom_row + index);
}
}
}
},
// Parts of full block: '▀', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '▔', '▉', '▊', '▋', '▌',
// '▍', '▎', '▏', '▐', '▕'.
'\u{2580}'..='\u{2587}' | '\u{2589}'..='\u{2590}' | '\u{2594}' | '\u{2595}' => {
let width = width as f32;
let height = height as f32;
let mut rect_width = match character {
'\u{2589}' => width * 7. / 8.,
'\u{258a}' => width * 6. / 8.,
'\u{258b}' => width * 5. / 8.,
'\u{258c}' => width * 4. / 8.,
'\u{258d}' => width * 3. / 8.,
'\u{258e}' => width * 2. / 8.,
'\u{258f}' => width * 1. / 8.,
'\u{2590}' => width * 4. / 8.,
'\u{2595}' => width * 1. / 8.,
_ => width,
};
let (mut rect_height, mut y) = match character {
'\u{2580}' => (height * 4. / 8., height * 8. / 8.),
'\u{2581}' => (height * 1. / 8., height * 1. / 8.),
'\u{2582}' => (height * 2. / 8., height * 2. / 8.),
'\u{2583}' => (height * 3. / 8., height * 3. / 8.),
'\u{2584}' => (height * 4. / 8., height * 4. / 8.),
'\u{2585}' => (height * 5. / 8., height * 5. / 8.),
'\u{2586}' => (height * 6. / 8., height * 6. / 8.),
'\u{2587}' => (height * 7. / 8., height * 7. / 8.),
'\u{2594}' => (height * 1. / 8., height * 8. / 8.),
_ => (height, height),
};
// Fix `y` coordinates.
y = (height - y).round();
// Ensure that resulted glyph will be visible and also round sizes instead of straight
// flooring them.
rect_width = rect_width.round().max(1.);
rect_height = rect_height.round().max(1.);
let x = match character {
'\u{2590}' => canvas.x_center(),
'\u{2595}' => width - rect_width,
_ => 0.,
};
canvas.draw_rect(x, y, rect_width, rect_height, COLOR_FILL);
},
// Shades: '░', '▒', '▓', '█'.
'\u{2588}' | '\u{2591}' | '\u{2592}' | '\u{2593}' => {
let color = match character {
'\u{2588}' => COLOR_FILL,
'\u{2591}' => COLOR_FILL_ALPHA_STEP_3,
'\u{2592}' => COLOR_FILL_ALPHA_STEP_2,
'\u{2593}' => COLOR_FILL_ALPHA_STEP_1,
_ => unreachable!(),
};
canvas.fill(color);
},
// Quadrants: '▖', '▗', '▘', '▙', '▚', '▛', '▜', '▝', '▞', '▟'.
'\u{2596}'..='\u{259F}' => {
let x_center = canvas.x_center().round().max(1.);
let y_center = canvas.y_center().round().max(1.);
let (w_second, h_second) = match character {
'\u{2598}' | '\u{2599}' | '\u{259a}' | '\u{259b}' | '\u{259c}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_first, h_first) = match character {
'\u{259b}' | '\u{259c}' | '\u{259d}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_third, h_third) = match character {
'\u{2596}' | '\u{2599}' | '\u{259b}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_fourth, h_fourth) = match character {
'\u{2597}' | '\u{2599}' | '\u{259a}' | '\u{259c}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
// Second quadrant.
canvas.draw_rect(0., 0., w_second, h_second, COLOR_FILL);
// First quadrant.
canvas.draw_rect(x_center, 0., w_first, h_first, COLOR_FILL);
// Third quadrant.
canvas.draw_rect(0., y_center, w_third, h_third, COLOR_FILL);
// Fourth quadrant.
canvas.draw_rect(x_center, y_center, w_fourth, h_fourth, COLOR_FILL);
},
// Sextants: '🬀', '🬁', '🬂', '🬃', '🬄', '🬅', '🬆', '🬇', '🬈', '🬉', '🬊', '🬋', '🬌', '🬍', '🬎',
// '🬏', '🬐', '🬑', '🬒', '🬓', '🬔', '🬕', '🬖', '🬗', '🬘', '🬙', '🬚', '🬛', '🬜', '🬝', '🬞', '🬟',
// '🬠', '🬡', '🬢', '🬣', '🬤', '🬥', '🬦', '🬧', '🬨', '🬩', '🬪', '🬫', '🬬', '🬭', '🬮', '🬯', '🬰',
// '🬱', '🬲', '🬳', '🬴', '🬵', '🬶', '🬷', '🬸', '🬹', '🬺', '🬻'.
'\u{1fb00}'..='\u{1fb3b}' => {
let x_center = canvas.x_center().round().max(1.);
let y_third = (height as f32 / 3.).round().max(1.);
let y_last_third = height as f32 - 2. * y_third;
let (w_top_left, h_top_left) = match character {
'\u{1fb00}' | '\u{1fb02}' | '\u{1fb04}' | '\u{1fb06}' | '\u{1fb08}'
| '\u{1fb0a}' | '\u{1fb0c}' | '\u{1fb0e}' | '\u{1fb10}' | '\u{1fb12}'
| '\u{1fb15}' | '\u{1fb17}' | '\u{1fb19}' | '\u{1fb1b}' | '\u{1fb1d}'
| '\u{1fb1f}' | '\u{1fb21}' | '\u{1fb23}' | '\u{1fb25}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb2a}' | '\u{1fb2c}' | '\u{1fb2e}' | '\u{1fb30}'
| '\u{1fb32}' | '\u{1fb34}' | '\u{1fb36}' | '\u{1fb38}' | '\u{1fb3a}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_top_right, h_top_right) = match character {
'\u{1fb01}' | '\u{1fb02}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb09}'
| '\u{1fb0a}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb11}' | '\u{1fb12}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb24}' | '\u{1fb25}'
| '\u{1fb28}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb2f}' | '\u{1fb30}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb37}' | '\u{1fb38}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_left, h_mid_left) = match character {
'\u{1fb03}' | '\u{1fb04}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb13}' | '\u{1fb14}'
| '\u{1fb15}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb22}' | '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb31}' | '\u{1fb32}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_right, h_mid_right) = match character {
'\u{1fb07}' | '\u{1fb08}' | '\u{1fb09}' | '\u{1fb0a}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb16}' | '\u{1fb17}'
| '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb26}' | '\u{1fb27}' | '\u{1fb28}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_bottom_left, h_bottom_left) = match character {
'\u{1fb0f}' | '\u{1fb10}' | '\u{1fb11}' | '\u{1fb12}' | '\u{1fb13}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb16}' | '\u{1fb17}' | '\u{1fb18}'
| '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
let (w_bottom_right, h_bottom_right) = match character {
'\u{1fb1e}' | '\u{1fb1f}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb22}'
| '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb26}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb29}' | '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
canvas.draw_rect(0., 0., w_top_left, h_top_left, COLOR_FILL);
canvas.draw_rect(x_center, 0., w_top_right, h_top_right, COLOR_FILL);
canvas.draw_rect(0., y_third, w_mid_left, h_mid_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third, w_mid_right, h_mid_right, COLOR_FILL);
canvas.draw_rect(0., y_third * 2., w_bottom_left, h_bottom_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third * 2., w_bottom_right, h_bottom_right, COLOR_FILL);
},
_ => unreachable!(),
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
}
} | //! Hand-rolled drawing of unicode characters that need to fully cover their character area.
use std::{cmp, mem, ops};
use crossfont::{BitmapBuffer, Metrics, RasterizedGlyph};
use crate::config::ui_config::Delta;
// Colors which are used for filling shade variants.
const COLOR_FILL_ALPHA_STEP_1: Pixel = Pixel { _r: 192, _g: 192, _b: 192 };
const COLOR_FILL_ALPHA_STEP_2: Pixel = Pixel { _r: 128, _g: 128, _b: 128 };
const COLOR_FILL_ALPHA_STEP_3: Pixel = Pixel { _r: 64, _g: 64, _b: 64 };
/// Default color used for filling.
const COLOR_FILL: Pixel = Pixel { _r: 255, _g: 255, _b: 255 };
const POWERLINE_TRIANGLE_LTR: char = '\u{e0b0}';
const POWERLINE_ARROW_LTR: char = '\u{e0b1}';
const POWERLINE_TRIANGLE_RTL: char = '\u{e0b2}';
const POWERLINE_ARROW_RTL: char = '\u{e0b3}';
/// Returns the rasterized glyph if the character is part of the built-in font.
pub fn builtin_glyph(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
glyph_offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let mut glyph = match character {
// Box drawing characters and block elements.
'\u{2500}'..='\u{259f}' | '\u{1fb00}'..='\u{1fb3b}' => {
box_drawing(character, metrics, offset)
},
// Powerline symbols: '','','',''
POWERLINE_TRIANGLE_LTR..=POWERLINE_ARROW_RTL => {
powerline_drawing(character, metrics, offset)?
},
_ => return None,
};
// Since we want to ignore `glyph_offset` for the built-in font, subtract it to compensate its
// addition when loading glyphs in the renderer.
glyph.left -= glyph_offset.x as i32;
glyph.top -= glyph_offset.y as i32;
Some(glyph)
}
fn box_drawing(character: char, metrics: &Metrics, offset: &Delta<i8>) -> RasterizedGlyph {
// Ensure that width and height is at least one.
let height = (metrics.line_height as i32 + offset.y as i32).max(1) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32).max(1) as usize;
let stroke_size = calculate_stroke_size(width);
let heavy_stroke_size = stroke_size * 2;
// Certain symbols require larger canvas than the cell itself, since for proper contiguous
// lines they require drawing on neighbour cells. So treat them specially early on and handle
// 'normal' characters later.
let mut canvas = match character {
// Diagonals: '╱', '╲', '╳'.
'\u{2571}'..='\u{2573}' => {
// Last coordinates.
let x_end = width as f32;
let mut y_end = height as f32;
let top = height as i32 + metrics.descent as i32 + stroke_size as i32;
let height = height + 2 * stroke_size;
let mut canvas = Canvas::new(width, height + 2 * stroke_size);
// The offset that we should take into account when drawing, since we've enlarged
// buffer vertically by twice of that amount.
let y_offset = stroke_size as f32;
y_end += y_offset;
let k = y_end / x_end;
let f_x = |x: f32, h: f32| -> f32 { -1. * k * x + h + y_offset };
let g_x = |x: f32, h: f32| -> f32 { k * x + h + y_offset };
let from_x = 0.;
let to_x = x_end + 1.;
for stroke_size in 0..2 * stroke_size {
let stroke_size = stroke_size as f32 / 2.;
if character == '\u{2571}' || character == '\u{2573}' {
let h = y_end - stroke_size;
let from_y = f_x(from_x, h);
let to_y = f_x(to_x, h);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
if character == '\u{2572}' || character == '\u{2573}' {
let from_y = g_x(from_x, stroke_size);
let to_y = g_x(to_x, stroke_size);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
}
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
return RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
};
},
_ => Canvas::new(width, height),
};
match character {
// Horizontal dashes: '┄', '┅', '┈', '┉', '╌', '╍'.
'\u{2504}' | '\u{2505}' | '\u{2508}' | '\u{2509}' | '\u{254c}' | '\u{254d}' => {
let (num_gaps, stroke_size) = match character {
'\u{2504}' => (2, stroke_size),
'\u{2505}' => (2, heavy_stroke_size),
'\u{2508}' => (3, stroke_size),
'\u{2509}' => (3, heavy_stroke_size),
'\u{254c}' => (1, stroke_size),
'\u{254d}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(width / 8, 1);
let dash_len =
cmp::max(width.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let y = canvas.y_center();
for gap in 0..=num_gaps {
let x = cmp::min(gap * (dash_len + dash_gap_len), width);
canvas.draw_h_line(x as f32, y, dash_len as f32, stroke_size);
}
},
// Vertical dashes: '┆', '┇', '┊', '┋', '╎', '╏'.
'\u{2506}' | '\u{2507}' | '\u{250a}' | '\u{250b}' | '\u{254e}' | '\u{254f}' => {
let (num_gaps, stroke_size) = match character {
'\u{2506}' => (2, stroke_size),
'\u{2507}' => (2, heavy_stroke_size),
'\u{250a}' => (3, stroke_size),
'\u{250b}' => (3, heavy_stroke_size),
'\u{254e}' => (1, stroke_size),
'\u{254f}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(height / 8, 1);
let dash_len =
cmp::max(height.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let x = canvas.x_center();
for gap in 0..=num_gaps {
let y = cmp::min(gap * (dash_len + dash_gap_len), height);
canvas.draw_v_line(x, y as f32, dash_len as f32, stroke_size);
}
},
// Horizontal lines: '─', '━', '╴', '╶', '╸', '╺'.
// Vertical lines: '│', '┃', '╵', '╷', '╹', '╻'.
// Light and heavy line box components:
// '┌','┍','┎','┏','┐','┑','┒','┓','└','┕','┖','┗','┘','┙','┚','┛',├','┝','┞','┟','┠','┡',
// '┢','┣','┤','┥','┦','┧','┨','┩','┪','┫','┬','┭','┮','┯','┰','┱','┲','┳','┴','┵','┶','┷',
// '┸','┹','┺','┻','┼','┽','┾','┿','╀','╁','╂','╃','╄','╅','╆','╇','╈','╉','╊','╋'.
// Mixed light and heavy lines: '╼', '╽', '╾', '╿'.
'\u{2500}'..='\u{2503}' | '\u{250c}'..='\u{254b}' | '\u{2574}'..='\u{257f}' => {
// Left horizontal line.
let stroke_size_h1 = match character {
'\u{2500}' | '\u{2510}' | '\u{2512}' | '\u{2518}' | '\u{251a}' | '\u{2524}'
| '\u{2526}' | '\u{2527}' | '\u{2528}' | '\u{252c}' | '\u{252e}' | '\u{2530}'
| '\u{2532}' | '\u{2534}' | '\u{2536}' | '\u{2538}' | '\u{253a}' | '\u{253c}'
| '\u{253e}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2544}' | '\u{2546}'
| '\u{254a}' | '\u{2574}' | '\u{257c}' => stroke_size,
'\u{2501}' | '\u{2511}' | '\u{2513}' | '\u{2519}' | '\u{251b}' | '\u{2525}'
| '\u{2529}' | '\u{252a}' | '\u{252b}' | '\u{252d}' | '\u{252f}' | '\u{2531}'
| '\u{2533}' | '\u{2535}' | '\u{2537}' | '\u{2539}' | '\u{253b}' | '\u{253d}'
| '\u{253f}' | '\u{2543}' | '\u{2545}' | '\u{2547}' | '\u{2548}' | '\u{2549}'
| '\u{254b}' | '\u{2578}' | '\u{257e}' => heavy_stroke_size,
_ => 0,
};
// Right horizontal line.
let stroke_size_h2 = match character {
'\u{2500}' | '\u{250c}' | '\u{250e}' | '\u{2514}' | '\u{2516}' | '\u{251c}'
| '\u{251e}' | '\u{251f}' | '\u{2520}' | '\u{252c}' | '\u{252d}' | '\u{2530}'
| '\u{2531}' | '\u{2534}' | '\u{2535}' | '\u{2538}' | '\u{2539}' | '\u{253c}'
| '\u{253d}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2543}' | '\u{2545}'
| '\u{2549}' | '\u{2576}' | '\u{257e}' => stroke_size,
'\u{2501}' | '\u{250d}' | '\u{250f}' | '\u{2515}' | '\u{2517}' | '\u{251d}'
| '\u{2521}' | '\u{2522}' | '\u{2523}' | '\u{252e}' | '\u{252f}' | '\u{2532}'
| '\u{2533}' | '\u{2536}' | '\u{2537}' | '\u{253a}' | '\u{253b}' | '\u{253e}'
| '\u{253f}' | '\u{2544}' | '\u{2546}' | '\u{2547}' | '\u{2548}' | '\u{254a}'
| '\u{254b}' | '\u{257a}' | '\u{257c}' => heavy_stroke_size,
_ => 0,
};
// Top vertical line.
let stroke_size_v1 = match character {
'\u{2502}' | '\u{2514}' | '\u{2515}' | '\u{2518}' | '\u{2519}' | '\u{251c}'
| '\u{251d}' | '\u{251f}' | '\u{2522}' | '\u{2524}' | '\u{2525}' | '\u{2527}'
| '\u{252a}' | '\u{2534}' | '\u{2535}' | '\u{2536}' | '\u{2537}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2541}' | '\u{2545}' | '\u{2546}'
| '\u{2548}' | '\u{2575}' | '\u{257d}' => stroke_size,
'\u{2503}' | '\u{2516}' | '\u{2517}' | '\u{251a}' | '\u{251b}' | '\u{251e}'
| '\u{2520}' | '\u{2521}' | '\u{2523}' | '\u{2526}' | '\u{2528}' | '\u{2529}'
| '\u{252b}' | '\u{2538}' | '\u{2539}' | '\u{253a}' | '\u{253b}' | '\u{2540}'
| '\u{2542}' | '\u{2543}' | '\u{2544}' | '\u{2547}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{2579}' | '\u{257f}' => heavy_stroke_size,
_ => 0,
};
// Bottom vertical line.
let stroke_size_v2 = match character {
'\u{2502}' | '\u{250c}' | '\u{250d}' | '\u{2510}' | '\u{2511}' | '\u{251c}'
| '\u{251d}' | '\u{251e}' | '\u{2521}' | '\u{2524}' | '\u{2525}' | '\u{2526}'
| '\u{2529}' | '\u{252c}' | '\u{252d}' | '\u{252e}' | '\u{252f}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2540}' | '\u{2543}' | '\u{2544}'
| '\u{2547}' | '\u{2577}' | '\u{257f}' => stroke_size,
'\u{2503}' | '\u{250e}' | '\u{250f}' | '\u{2512}' | '\u{2513}' | '\u{251f}'
| '\u{2520}' | '\u{2522}' | '\u{2523}' | '\u{2527}' | '\u{2528}' | '\u{252a}'
| '\u{252b}' | '\u{2530}' | '\u{2531}' | '\u{2532}' | '\u{2533}' | '\u{2541}'
| '\u{2542}' | '\u{2545}' | '\u{2546}' | '\u{2548}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{257b}' | '\u{257d}' => heavy_stroke_size,
_ => 0,
};
let x_v = canvas.x_center();
let y_h = canvas.y_center();
let v_line_bounds_top = canvas.v_line_bounds(x_v, stroke_size_v1);
let v_line_bounds_bot = canvas.v_line_bounds(x_v, stroke_size_v2);
let h_line_bounds_left = canvas.h_line_bounds(y_h, stroke_size_h1);
let h_line_bounds_right = canvas.h_line_bounds(y_h, stroke_size_h2);
let size_h1 = cmp::max(v_line_bounds_top.1 as i32, v_line_bounds_bot.1 as i32) as f32;
let x_h = cmp::min(v_line_bounds_top.0 as i32, v_line_bounds_bot.0 as i32) as f32;
let size_h2 = width as f32 - x_h;
let size_v1 =
cmp::max(h_line_bounds_left.1 as i32, h_line_bounds_right.1 as i32) as f32;
let y_v = cmp::min(h_line_bounds_left.0 as i32, h_line_bounds_right.0 as i32) as f32;
let size_v2 = height as f32 - y_v;
// Left horizontal line.
canvas.draw_h_line(0., y_h, size_h1, stroke_size_h1);
// Right horizontal line.
canvas.draw_h_line(x_h, y_h, size_h2, stroke_size_h2);
// Top vertical line.
canvas.draw_v_line(x_v, 0., size_v1, stroke_size_v1);
// Bottom vertical line.
canvas.draw_v_line(x_v, y_v, size_v2, stroke_size_v2);
},
// Light and double line box components:
// '═','║','╒','╓','╔','╕','╖','╗','╘','╙','╚','╛','╜','╝','╞','╟','╠','╡','╢','╣','╤','╥',
// '╦','╧','╨','╩','╪','╫','╬'.
'\u{2550}'..='\u{256c}' => {
let v_lines = match character {
'\u{2552}' | '\u{2555}' | '\u{2558}' | '\u{255b}' | '\u{255e}' | '\u{2561}'
| '\u{2564}' | '\u{2567}' | '\u{256a}' => (canvas.x_center(), canvas.x_center()),
_ => {
let v_line_bounds = canvas.v_line_bounds(canvas.x_center(), stroke_size);
let left_line = cmp::max(v_line_bounds.0 as i32 - 1, 0) as f32;
let right_line = cmp::min(v_line_bounds.1 as i32 + 1, width as i32) as f32;
(left_line, right_line)
},
};
let h_lines = match character {
'\u{2553}' | '\u{2556}' | '\u{2559}' | '\u{255c}' | '\u{255f}' | '\u{2562}'
| '\u{2565}' | '\u{2568}' | '\u{256b}' => (canvas.y_center(), canvas.y_center()),
_ => {
let h_line_bounds = canvas.h_line_bounds(canvas.y_center(), stroke_size);
let top_line = cmp::max(h_line_bounds.0 as i32 - 1, 0) as f32;
let bottom_line = cmp::min(h_line_bounds.1 as i32 + 1, height as i32) as f32;
(top_line, bottom_line)
},
};
// Get bounds for each double line we could have.
let v_left_bounds = canvas.v_line_bounds(v_lines.0, stroke_size);
let v_right_bounds = canvas.v_line_bounds(v_lines.1, stroke_size);
let h_top_bounds = canvas.h_line_bounds(h_lines.0, stroke_size);
let h_bot_bounds = canvas.h_line_bounds(h_lines.1, stroke_size);
let height = height as f32;
let width = width as f32;
// Left horizontal part.
let (top_left_size, bot_left_size) = match character {
'\u{2550}' | '\u{256b}' => (canvas.x_center(), canvas.x_center()),
'\u{2555}'..='\u{2557}' => (v_right_bounds.1, v_left_bounds.1),
'\u{255b}'..='\u{255d}' => (v_left_bounds.1, v_right_bounds.1),
'\u{2561}'..='\u{2563}' | '\u{256a}' | '\u{256c}' => {
(v_left_bounds.1, v_left_bounds.1)
},
'\u{2564}'..='\u{2568}' => (canvas.x_center(), v_left_bounds.1),
'\u{2569}'..='\u{2569}' => (v_left_bounds.1, canvas.x_center()),
_ => (0., 0.),
};
// Right horizontal part.
let (top_right_x, bot_right_x, right_size) = match character {
'\u{2550}' | '\u{2565}' | '\u{256b}' => {
(canvas.x_center(), canvas.x_center(), width)
},
'\u{2552}'..='\u{2554}' | '\u{2568}' => (v_left_bounds.0, v_right_bounds.0, width),
'\u{2558}'..='\u{255a}' => (v_right_bounds.0, v_left_bounds.0, width),
'\u{255e}'..='\u{2560}' | '\u{256a}' | '\u{256c}' => {
(v_right_bounds.0, v_right_bounds.0, width)
},
'\u{2564}' | '\u{2566}' => (canvas.x_center(), v_right_bounds.0, width),
'\u{2567}' | '\u{2569}' => (v_right_bounds.0, canvas.x_center(), width),
_ => (0., 0., 0.),
};
// Top vertical part.
let (left_top_size, right_top_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center()),
'\u{2558}'..='\u{255c}' | '\u{2568}' => (h_bot_bounds.1, h_top_bounds.1),
'\u{255d}' => (h_top_bounds.1, h_bot_bounds.1),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_top_bounds.1),
'\u{2561}'..='\u{2563}' => (h_top_bounds.1, canvas.y_center()),
'\u{2567}' | '\u{2569}' | '\u{256b}' | '\u{256c}' => {
(h_top_bounds.1, h_top_bounds.1)
},
_ => (0., 0.),
};
// Bottom vertical part.
let (left_bot_y, right_bot_y, bottom_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center(), height),
'\u{2552}'..='\u{2554}' => (h_top_bounds.0, h_bot_bounds.0, height),
'\u{2555}'..='\u{2557}' => (h_bot_bounds.0, h_top_bounds.0, height),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_bot_bounds.0, height),
'\u{2561}'..='\u{2563}' => (h_bot_bounds.0, canvas.y_center(), height),
'\u{2564}'..='\u{2566}' | '\u{256b}' | '\u{256c}' => {
(h_bot_bounds.0, h_bot_bounds.0, height)
},
_ => (0., 0., 0.),
};
// Left horizontal line.
canvas.draw_h_line(0., h_lines.0, top_left_size, stroke_size);
canvas.draw_h_line(0., h_lines.1, bot_left_size, stroke_size);
// Right horizontal line.
canvas.draw_h_line(top_right_x, h_lines.0, right_size, stroke_size);
canvas.draw_h_line(bot_right_x, h_lines.1, right_size, stroke_size);
// Top vertical line.
canvas.draw_v_line(v_lines.0, 0., left_top_size, stroke_size);
canvas.draw_v_line(v_lines.1, 0., right_top_size, stroke_size);
// Bottom vertical line.
canvas.draw_v_line(v_lines.0, left_bot_y, bottom_size, stroke_size);
canvas.draw_v_line(v_lines.1, right_bot_y, bottom_size, stroke_size);
},
// Arcs: '╭', '╮', '╯', '╰'.
'\u{256d}' | '\u{256e}' | '\u{256f}' | '\u{2570}' => {
canvas.draw_ellipse_arc(stroke_size);
// Mirror `X` axis.
if character == '\u{256d}' || character == '\u{2570}' {
let center = canvas.x_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != width % 2);
let buffer = canvas.buffer_mut();
for y in 1..height {
let left = (y - 1) * width;
let right = y * width - 1;
if extra_offset != 0 {
buffer[right] = buffer[left];
}
for offset in 0..center {
buffer.swap(left + offset, right - offset - extra_offset);
}
}
}
// Mirror `Y` axis.
if character == '\u{256d}' || character == '\u{256e}' {
let center = canvas.y_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != height % 2);
let buffer = canvas.buffer_mut();
if extra_offset != 0 {
let bottom_row = (height - 1) * width;
for index in 0..width {
buffer[bottom_row + index] = buffer[index];
}
}
for offset in 1..=center {
let top_row = (offset - 1) * width;
let bottom_row = (height - offset - extra_offset) * width;
for index in 0..width {
buffer.swap(top_row + index, bottom_row + index);
}
}
}
},
// Parts of full block: '▀', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '▔', '▉', '▊', '▋', '▌',
// '▍', '▎', '▏', '▐', '▕'.
'\u{2580}'..='\u{2587}' | '\u{2589}'..='\u{2590}' | '\u{2594}' | '\u{2595}' => {
let width = width as f32;
let height = height as f32;
let mut rect_width = match character {
'\u{2589}' => width * 7. / 8.,
'\u{258a}' => width * 6. / 8.,
'\u{258b}' => width * 5. / 8.,
'\u{258c}' => width * 4. / 8.,
'\u{258d}' => width * 3. / 8.,
'\u{258e}' => width * 2. / 8.,
'\u{258f}' => width * 1. / 8.,
'\u{2590}' => width * 4. / 8.,
'\u{2595}' => width * 1. / 8.,
_ => width,
};
let (mut rect_height, mut y) = match character {
'\u{2580}' => (height * 4. / 8., height * 8. / 8.),
'\u{2581}' => (height * 1. / 8., height * 1. / 8.),
'\u{2582}' => (height * 2. / 8., height * 2. / 8.),
'\u{2583}' => (height * 3. / 8., height * 3. / 8.),
'\u{2584}' => (height * 4. / 8., height * 4. / 8.),
'\u{2585}' => (height * 5. / 8., height * 5. / 8.),
'\u{2586}' => (height * 6. / 8., height * 6. / 8.),
'\u{2587}' => (height * 7. / 8., height * 7. / 8.),
'\u{2594}' => (height * 1. / 8., height * 8. / 8.),
_ => (height, height),
};
// Fix `y` coordinates.
y = (height - y).round();
// Ensure that resulted glyph will be visible and also round sizes instead of straight
// flooring them.
rect_width = rect_width.round().max(1.);
rect_height = rect_height.round().max(1.);
let x = match character {
'\u{2590}' => canvas.x_center(),
'\u{2595}' => width - rect_width,
_ => 0.,
};
canvas.draw_rect(x, y, rect_width, rect_height, COLOR_FILL);
},
// Shades: '░', '▒', '▓', '█'.
'\u{2588}' | '\u{2591}' | '\u{2592}' | '\u{2593}' => {
let color = match character {
'\u{2588}' => COLOR_FILL,
'\u{2591}' => COLOR_FILL_ALPHA_STEP_3,
'\u{2592}' => COLOR_FILL_ALPHA_STEP_2,
'\u{2593}' => COLOR_FILL_ALPHA_STEP_1,
_ => unreachable!(),
};
canvas.fill(color);
},
// Quadrants: '▖', '▗', '▘', '▙', '▚', '▛', '▜', '▝', '▞', '▟'.
'\u{2596}'..='\u{259F}' => {
let x_center = canvas.x_center().round().max(1.);
let y_center = canvas.y_center().round().max(1.);
let (w_second, h_second) = match character {
'\u{2598}' | '\u{2599}' | '\u{259a}' | '\u{259b}' | '\u{259c}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_first, h_first) = match character {
'\u{259b}' | '\u{259c}' | '\u{259d}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_third, h_third) = match character {
'\u{2596}' | '\u{2599}' | '\u{259b}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_fourth, h_fourth) = match character {
'\u{2597}' | '\u{2599}' | '\u{259a}' | '\u{259c}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
// Second quadrant.
canvas.draw_rect(0., 0., w_second, h_second, COLOR_FILL);
// First quadrant.
canvas.draw_rect(x_center, 0., w_first, h_first, COLOR_FILL);
// Third quadrant.
canvas.draw_rect(0., y_center, w_third, h_third, COLOR_FILL);
// Fourth quadrant.
canvas.draw_rect(x_center, y_center, w_fourth, h_fourth, COLOR_FILL);
},
// Sextants: '🬀', '🬁', '🬂', '🬃', '🬄', '🬅', '🬆', '🬇', '🬈', '🬉', '🬊', '🬋', '🬌', '🬍', '🬎',
// '🬏', '🬐', '🬑', '🬒', '🬓', '🬔', '🬕', '🬖', '🬗', '🬘', '🬙', '🬚', '🬛', '🬜', '🬝', '🬞', '🬟',
// '🬠', '🬡', '🬢', '🬣', '🬤', '🬥', '🬦', '🬧', '🬨', '🬩', '🬪', '🬫', '🬬', '🬭', '🬮', '🬯', '🬰',
// '🬱', '🬲', '🬳', '🬴', '🬵', '🬶', '🬷', '🬸', '🬹', '🬺', '🬻'.
'\u{1fb00}'..='\u{1fb3b}' => {
let x_center = canvas.x_center().round().max(1.);
let y_third = (height as f32 / 3.).round().max(1.);
let y_last_third = height as f32 - 2. * y_third;
let (w_top_left, h_top_left) = match character {
'\u{1fb00}' | '\u{1fb02}' | '\u{1fb04}' | '\u{1fb06}' | '\u{1fb08}'
| '\u{1fb0a}' | '\u{1fb0c}' | '\u{1fb0e}' | '\u{1fb10}' | '\u{1fb12}'
| '\u{1fb15}' | '\u{1fb17}' | '\u{1fb19}' | '\u{1fb1b}' | '\u{1fb1d}'
| '\u{1fb1f}' | '\u{1fb21}' | '\u{1fb23}' | '\u{1fb25}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb2a}' | '\u{1fb2c}' | '\u{1fb2e}' | '\u{1fb30}'
| '\u{1fb32}' | '\u{1fb34}' | '\u{1fb36}' | '\u{1fb38}' | '\u{1fb3a}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_top_right, h_top_right) = match character {
'\u{1fb01}' | '\u{1fb02}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb09}'
| '\u{1fb0a}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb11}' | '\u{1fb12}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb24}' | '\u{1fb25}'
| '\u{1fb28}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb2f}' | '\u{1fb30}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb37}' | '\u{1fb38}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_left, h_mid_left) = match character {
'\u{1fb03}' | '\u{1fb04}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb13}' | '\u{1fb14}'
| '\u{1fb15}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb22}' | '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb31}' | '\u{1fb32}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_right, h_mid_right) = match character {
'\u{1fb07}' | '\u{1fb08}' | '\u{1fb09}' | '\u{1fb0a}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb16}' | '\u{1fb17}'
| '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb26}' | '\u{1fb27}' | '\u{1fb28}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_bottom_left, h_bottom_left) = match character {
'\u{1fb0f}' | '\u{1fb10}' | '\u{1fb11}' | '\u{1fb12}' | '\u{1fb13}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb16}' | '\u{1fb17}' | '\u{1fb18}'
| '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
let (w_bottom_right, h_bottom_right) = match character {
'\u{1fb1e}' | '\u{1fb1f}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb22}'
| '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb26}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb29}' | '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
canvas.draw_rect(0., 0., w_top_left, h_top_left, COLOR_FILL);
canvas.draw_rect(x_center, 0., w_top_right, h_top_right, COLOR_FILL);
canvas.draw_rect(0., y_third, w_mid_left, h_mid_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third, w_mid_right, h_mid_right, COLOR_FILL);
canvas.draw_rect(0., y_third * 2., w_bottom_left, h_bottom_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third * 2., w_bottom_right, h_bottom_right, COLOR_FILL);
},
_ => unreachable!(),
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
}
}
fn powerline_drawing(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let height = (metrics.line_height as i32 + offset.y as i32) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32) as usize;
let extra_thickness = calculate_stroke_size(width) as i32 - 1;
let mut canvas = Canvas::new(width, height);
let slope = 1;
let top_y = 1;
let bottom_y = height as i32 - top_y - 1;
// Start with offset `1` and draw until the intersection of the f(x) = slope * x + 1 and
// g(x) = H - slope * x - 1 lines. The intersection happens when f(x) = g(x), which is at
// x = (H - 2) / (2 * slope).
let x_intersection = (height as i32 + 1) / 2 - 1;
// Don't use built-in font if we'd cut the tip too much, for example when the font is really
// narrow.
if x_intersection - width as i32 > 1 {
return None;
}
let top_line = (0..x_intersection).map(|x| line_equation(slope, x, top_y));
let bottom_line = (0..x_intersection).map(|x| line_equation(-slope, x, bottom_y));
// Inner lines to make arrows thicker.
let mut top_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(slope, x, top_y + extra_thickness));
let mut bottom_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(-slope, x, bottom_y - extra_thickness));
// NOTE: top_line and bottom_line have the same amount of iterations.
for (p1, p2) in top_line.zip(bottom_line) {
if character == POWERLINE_TRIANGLE_LTR || character == POWERLINE_TRIANGLE_RTL {
canvas.draw_rect(0., p1.1, p1.0 + 1., 1., COLOR_FILL);
canvas.draw_rect(0., p2.1, p2.0 + 1., 1., COLOR_FILL);
} else if character == POWERLINE_ARROW_LTR || character == POWERLINE_ARROW_RTL {
let p3 = top_inner_line.next().unwrap_or(p2);
let p4 = bottom_inner_line.next().unwrap_or(p1);
// If we can't fit the entire arrow in the cell, we cut off the tip of the arrow by
// drawing a rectangle between the two lines.
if p1.0 as usize + 1 == width {
canvas.draw_rect(p1.0, p1.1, 1., p2.1 - p1.1 + 1., COLOR_FILL);
break;
} else {
canvas.draw_rect(p1.0, p1.1, 1., p3.1 - p1.1 + 1., COLOR_FILL);
canvas.draw_rect(p4.0, p4.1, 1., p2.1 - p4.1 + 1., COLOR_FILL);
}
}
}
if character == POWERLINE_TRIANGLE_RTL || character == POWERLINE_ARROW_RTL {
canvas.flip_horizontal();
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
Some(RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
})
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct Pixel {
_r: u8,
_g: u8,
_b: u8,
}
impl Pixel {
fn gray(color: u8) -> Self {
Self { _r: color, _g: color, _b: color }
}
}
impl ops::Add for Pixel {
type Output = Pixel;
fn add(self, rhs: Pixel) -> Self::Output {
let _r = self._r.saturating_add(rhs._r);
let _g = self._g.saturating_add(rhs._g);
let _b = self._b.saturating_add(rhs._b);
Pixel { _r, _g, _b }
}
}
impl ops::Div<u8> for Pixel {
type Output = Pixel;
fn div(self, rhs: u8) -> Self::Output {
let _r = self._r / rhs;
let _g = self._g / rhs;
let _b = self._b / rhs;
Pixel { _r, _g, _b }
}
}
/// Canvas which is used for simple line drawing operations.
///
/// The coordinate system is the following:
///
/// 0 x
/// --------------→
/// |
/// |
/// |
/// |
/// |
/// |
/// y↓
struct Canvas {
/// Canvas width.
width: usize,
/// Canvas height.
height: usize,
/// Canvas buffer we draw on.
buffer: Vec<Pixel>,
}
impl Canvas {
/// Builds new `Canvas` for line drawing with the given `width` and `height` with default color.
fn new(width: usize, height: usize) -> Self {
let buffer = vec![Pixel::default(); width * height];
Self { width, height, buffer }
}
/// Vertical center of the `Canvas`.
fn y_center(&self) -> f32 {
self.height as f32 / 2.
}
/// Horizontal center of the `Canvas`.
fn x_center(&self) -> f32 {
self.width as f32 / 2.
}
/// Canvas underlying buffer for direct manipulation
fn buffer_mut(&mut self) -> &mut [Pixel] {
&mut self.buffer
}
/// Gives bounds for horizontal straight line on `y` with `stroke_size`.
fn h_line_bounds(&self, y: f32, stroke_size: usize) -> (f32, f32) {
let start_y = cmp::max((y - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_y = cmp::min((y + stroke_size as f32 / 2.) as i32, self.height as i32) as f32;
(start_y, end_y)
}
/// Gives bounds for vertical straight line on `y` with `stroke_size`.
fn v_line_bounds(&self, x: f32, stroke_size: usize) -> (f32, f32) {
let start_x = cmp::max((x - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_x = cmp::min((x + stroke_size as f32 / 2.) as i32, self.width as i32) as f32;
(start_x, end_x)
}
/// Flip horizontally.
fn flip_horizontal(&mut self) {
for row in 0..self.height {
for col in 0..self.width / 2 {
let index = row * self.width;
self.buffer.swap(index + col, index + self.width - col - 1)
}
}
}
/// Draws a horizontal straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_h_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_y, end_y) = self.h_line_bounds(y, stroke_size);
self.draw_rect(x, start_y, size, end_y - start_y, COLOR_FILL);
}
/// Draws a vertical straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_v_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_x, end_x) = self.v_line_bounds(x, stroke_size);
self.draw_rect(start_x, y, end_x - start_x, size, COLOR_FILL);
}
/// Draws a rect from the (`x`, `y`) of the given `width` and `height` using `color`.
fn draw_rect(&mut self, x: f32, y: f32, width: f32, height: f32, color: Pixel) {
let start_x = x as usize;
let end_x = cmp::min((x + width) as usize, self.width);
let start_y = y as usize;
let end_y = cmp::min((y + height) as usize, self.height);
for y in start_y..end_y {
let y = y * self.width;
self.buffer[start_x + y..end_x + y].fill(color);
}
}
/// Put pixel into buffer with the given color if the color is brighter than the one buffer
/// already has in place.
#[inline]
fn put_pixel(&mut self, x: f32, y: f32, color: Pixel) {
if x < 0. || y < 0. || x > self.width as f32 - 1. || y > self.height as f32 - 1. {
return;
}
let index = x as usize + y as usize * self.width;
if color._r > self.buffer[index]._r {
self.buffer[index] = color;
}
}
/// Xiaolin Wu's line drawing from (`from_x`, `from_y`) to (`to_x`, `to_y`).
fn draw_line(&mut self, mut from_x: f32, mut from_y: f32, mut to_x: f32, mut to_y: f32) {
let steep = (to_y - from_y).abs() > (to_x - from_x).abs();
if steep {
mem::swap(&mut from_x, &mut from_y);
mem::swap(&mut to_x, &mut to_y);
}
if from_x > to_x {
mem::swap(&mut from_x, &mut to_x);
mem::swap(&mut from_y, &mut to_y);
}
let delta_x = to_x - from_x;
let delta_y = to_y - from_y;
let gradient = if delta_x.abs() <= f32::EPSILON { 1. } else { delta_y / delta_x };
let x_end = f32::round(from_x);
let y_end = from_y + gradient * (x_end - from_x);
let x_gap = 1. - (from_x + 0.5).fract();
let xpxl1 = x_end;
let ypxl1 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl1, xpxl1, color_1);
self.put_pixel(ypxl1 + 1., xpxl1, color_2);
} else {
self.put_pixel(xpxl1, ypxl1, color_1);
self.put_pixel(xpxl1 + 1., ypxl1, color_2);
}
let mut intery = y_end + gradient;
let x_end = f32::round(to_x);
let y_end = to_y + gradient * (x_end - to_x);
let x_gap = (to_x + 0.5).fract();
let xpxl2 = x_end;
let ypxl2 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl2, xpxl2, color_1);
self.put_pixel(ypxl2 + 1., xpxl2, color_2);
} else {
self.put_pixel(xpxl2, ypxl2, color_1);
self.put_pixel(xpxl2, ypxl2 + 1., color_2);
}
if steep {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(intery.trunc(), x as f32, color_1);
self.put_pixel(intery.trunc() + 1., x as f32, color_2);
intery += gradient;
}
} else {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(x as f32, intery.trunc(), color_1);
self.put_pixel(x as f32, intery.trunc() + 1., color_2);
intery += gradient;
}
}
}
/// Draws a part of an ellipse centered in `(0., 0.)` with `self.x_center()` and `self.y_center`
/// vertex and co-vertex respectively using a given `stroke` in the bottom-right quadrant of the
/// `Canvas` coordinate system.
fn draw_ellipse_arc(&mut self, stroke_size: usize) {
fn colors_with_error(error: f32, max_transparency: f32) -> (Pixel, Pixel) {
let transparency = error * max_transparency;
let alpha_1 = 1. - transparency;
let alpha_2 = 1. - (max_transparency - transparency);
let color_1 = Pixel::gray((COLOR_FILL._r as f32 * alpha_1) as u8);
let color_2 = Pixel::gray((COLOR_FILL._r as f32 * alpha_2) as u8);
(color_1, color_2)
}
let h_line_bounds = self.h_line_bounds(self.y_center(), stroke_size);
let v_line_bounds = self.v_line_bounds(self.x_center(), stroke_size);
let h_line_bounds = (h_line_bounds.0 as usize, h_line_bounds.1 as usize);
let v_line_bounds = (v_line_bounds.0 as usize, v_line_bounds.1 as usize);
let max_transparency = 0.5;
for (radius_y, radius_x) in
(h_line_bounds.0..h_line_bounds.1).zip(v_line_bounds.0..v_line_bounds.1)
{
let radius_x = radius_x as f32;
let radius_y = radius_y as f32;
let radius_x2 = radius_x * radius_x;
let radius_y2 = radius_y * radius_y;
let quarter = f32::round(radius_x2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for x in 0..=quarter {
let x = x as f32;
let y = radius_y * f32::sqrt(1. - x * x / radius_x2);
let error = y.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x = x.clamp(0., radius_x);
let y_next = (y + 1.).clamp(0., h_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., h_line_bounds.1 as f32 - 1.);
self.put_pixel(x, y, color_1);
self.put_pixel(x, y_next, color_2);
}
let quarter = f32::round(radius_y2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for y in 0..=quarter {
let y = y as f32;
let x = radius_x * f32::sqrt(1. - y * y / radius_y2);
let error = x - x.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x_next = (x + 1.).clamp(0., v_line_bounds.1 as f32 - 1.);
let x = x.clamp(0., v_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., radius_y);
self.put_pixel(x, y, color_1);
self.put_pixel(x_next, y, color_2);
}
}
// Ensure the part closer to edges is properly filled.
self.draw_h_line(0., self.y_center(), stroke_size as f32, stroke_size);
self.draw_v_line(self.x_center(), 0., stroke_size as f32, stroke_size);
// Fill the resulted arc, since it could have gaps in-between.
for y in 0..self.height {
let row = y * self.width;
let left = match self.buffer[row..row + self.width].iter().position(|p| p._r != 0) {
Some(left) => row + left,
_ => continue,
};
let right = match self.buffer[row..row + self.width].iter().rposition(|p| p._r != 0) {
Some(right) => row + right,
_ => continue,
};
for index in left + 1..right {
self.buffer[index] =
self.buffer[index] + self.buffer[index - 1] / 2 + self.buffer[index + 1] / 2;
}
}
}
/// Fills the `Canvas` with the given `Color`.
fn fill(&mut self, color: Pixel) {
self.buffer.fill(color);
}
/// Consumes `Canvas` and returns its underlying storage as raw byte vector.
fn into_raw(self) -> Vec<u8> {
// SAFETY This is safe since we use `repr(packed)` on `Pixel` struct for underlying storage
// of the `Canvas` buffer which consists of three u8 values.
unsafe {
let capacity = self.buffer.capacity() * mem::size_of::<Pixel>();
let len = self.buffer.len() * mem::size_of::<Pixel>();
let buf = self.buffer.as_ptr() as *mut u8;
mem::forget(self.buffer);
Vec::from_raw_parts(buf, len, capacity)
}
}
}
/// Compute line width.
fn calculate_stroke_size(cell_width: usize) -> usize {
// Use one eight of the cell width, since this is used as a step size for block elements.
cmp::max((cell_width as f32 / 8.).round() as usize, 1)
}
/// `f(x) = slope * x + offset` equation.
fn line_equation(slope: i32, x: i32, offset: i32) -> (f32, f32) {
(x as f32, (slope * x + offset) as f32)
}
#[cfg(test)]
mod tests {
use super::*;
use crossfont::Metrics;
// Dummy metrics values to test builtin glyphs coverage.
const METRICS: Metrics = Metrics {
average_advance: 6.,
line_height: 16.,
descent: 4.,
underline_position: 2.,
underline_thickness: 2.,
strikeout_position: 2.,
strikeout_thickness: 2.,
};
#[test]
fn builtin_line_drawing_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in ('\u{2500}'..='\u{259f}').chain('\u{1fb00}'..='\u{1fb3b}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{2450}'..'\u{2500}').chain('\u{25a0}'..'\u{2600}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
#[test]
fn builtin_powerline_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in '\u{e0b0}'..='\u{e0b3}' {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{e0a0}'..'\u{e0b0}').chain('\u{e0b4}'..'\u{e0c0}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Metrics {\n pub average_advance: f64,\n pub line_height: f64,\n pub descent: f32,\n pub underline_position: f32,\n pub underline_thickness: f32,\n pub strikeout_position: f32,\n pub strikeout_thickness: f32,\n}"
],
"name": "metrics",
"type": "&Metrics"
},
{
"definitions": [
"pub struct Delta<T: Default> {\n /// Horizontal change.\n pub x: T,\n /// Vertical change.\n pub y: T,\n}"
],
"name": "offset",
"type": "&Delta<i8>"
}
],
"end_line": 588,
"name": "box_drawing",
"signature": "fn box_drawing(character: char, metrics: &Metrics, offset: &Delta<i8>) -> RasterizedGlyph",
"start_line": 49
} | {
"class_name": "",
"class_signature": ""
} |
powerline_drawing | alacritty-master/alacritty/src/renderer/text/builtin_font.rs | fn powerline_drawing(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let height = (metrics.line_height as i32 + offset.y as i32) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32) as usize;
let extra_thickness = calculate_stroke_size(width) as i32 - 1;
let mut canvas = Canvas::new(width, height);
let slope = 1;
let top_y = 1;
let bottom_y = height as i32 - top_y - 1;
// Start with offset `1` and draw until the intersection of the f(x) = slope * x + 1 and
// g(x) = H - slope * x - 1 lines. The intersection happens when f(x) = g(x), which is at
// x = (H - 2) / (2 * slope).
let x_intersection = (height as i32 + 1) / 2 - 1;
// Don't use built-in font if we'd cut the tip too much, for example when the font is really
// narrow.
if x_intersection - width as i32 > 1 {
return None;
}
let top_line = (0..x_intersection).map(|x| line_equation(slope, x, top_y));
let bottom_line = (0..x_intersection).map(|x| line_equation(-slope, x, bottom_y));
// Inner lines to make arrows thicker.
let mut top_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(slope, x, top_y + extra_thickness));
let mut bottom_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(-slope, x, bottom_y - extra_thickness));
// NOTE: top_line and bottom_line have the same amount of iterations.
for (p1, p2) in top_line.zip(bottom_line) {
if character == POWERLINE_TRIANGLE_LTR || character == POWERLINE_TRIANGLE_RTL {
canvas.draw_rect(0., p1.1, p1.0 + 1., 1., COLOR_FILL);
canvas.draw_rect(0., p2.1, p2.0 + 1., 1., COLOR_FILL);
} else if character == POWERLINE_ARROW_LTR || character == POWERLINE_ARROW_RTL {
let p3 = top_inner_line.next().unwrap_or(p2);
let p4 = bottom_inner_line.next().unwrap_or(p1);
// If we can't fit the entire arrow in the cell, we cut off the tip of the arrow by
// drawing a rectangle between the two lines.
if p1.0 as usize + 1 == width {
canvas.draw_rect(p1.0, p1.1, 1., p2.1 - p1.1 + 1., COLOR_FILL);
break;
} else {
canvas.draw_rect(p1.0, p1.1, 1., p3.1 - p1.1 + 1., COLOR_FILL);
canvas.draw_rect(p4.0, p4.1, 1., p2.1 - p4.1 + 1., COLOR_FILL);
}
}
}
if character == POWERLINE_TRIANGLE_RTL || character == POWERLINE_ARROW_RTL {
canvas.flip_horizontal();
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
Some(RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
})
} | //! Hand-rolled drawing of unicode characters that need to fully cover their character area.
use std::{cmp, mem, ops};
use crossfont::{BitmapBuffer, Metrics, RasterizedGlyph};
use crate::config::ui_config::Delta;
// Colors which are used for filling shade variants.
const COLOR_FILL_ALPHA_STEP_1: Pixel = Pixel { _r: 192, _g: 192, _b: 192 };
const COLOR_FILL_ALPHA_STEP_2: Pixel = Pixel { _r: 128, _g: 128, _b: 128 };
const COLOR_FILL_ALPHA_STEP_3: Pixel = Pixel { _r: 64, _g: 64, _b: 64 };
/// Default color used for filling.
const COLOR_FILL: Pixel = Pixel { _r: 255, _g: 255, _b: 255 };
const POWERLINE_TRIANGLE_LTR: char = '\u{e0b0}';
const POWERLINE_ARROW_LTR: char = '\u{e0b1}';
const POWERLINE_TRIANGLE_RTL: char = '\u{e0b2}';
const POWERLINE_ARROW_RTL: char = '\u{e0b3}';
/// Returns the rasterized glyph if the character is part of the built-in font.
pub fn builtin_glyph(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
glyph_offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let mut glyph = match character {
// Box drawing characters and block elements.
'\u{2500}'..='\u{259f}' | '\u{1fb00}'..='\u{1fb3b}' => {
box_drawing(character, metrics, offset)
},
// Powerline symbols: '','','',''
POWERLINE_TRIANGLE_LTR..=POWERLINE_ARROW_RTL => {
powerline_drawing(character, metrics, offset)?
},
_ => return None,
};
// Since we want to ignore `glyph_offset` for the built-in font, subtract it to compensate its
// addition when loading glyphs in the renderer.
glyph.left -= glyph_offset.x as i32;
glyph.top -= glyph_offset.y as i32;
Some(glyph)
}
fn box_drawing(character: char, metrics: &Metrics, offset: &Delta<i8>) -> RasterizedGlyph {
// Ensure that width and height is at least one.
let height = (metrics.line_height as i32 + offset.y as i32).max(1) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32).max(1) as usize;
let stroke_size = calculate_stroke_size(width);
let heavy_stroke_size = stroke_size * 2;
// Certain symbols require larger canvas than the cell itself, since for proper contiguous
// lines they require drawing on neighbour cells. So treat them specially early on and handle
// 'normal' characters later.
let mut canvas = match character {
// Diagonals: '╱', '╲', '╳'.
'\u{2571}'..='\u{2573}' => {
// Last coordinates.
let x_end = width as f32;
let mut y_end = height as f32;
let top = height as i32 + metrics.descent as i32 + stroke_size as i32;
let height = height + 2 * stroke_size;
let mut canvas = Canvas::new(width, height + 2 * stroke_size);
// The offset that we should take into account when drawing, since we've enlarged
// buffer vertically by twice of that amount.
let y_offset = stroke_size as f32;
y_end += y_offset;
let k = y_end / x_end;
let f_x = |x: f32, h: f32| -> f32 { -1. * k * x + h + y_offset };
let g_x = |x: f32, h: f32| -> f32 { k * x + h + y_offset };
let from_x = 0.;
let to_x = x_end + 1.;
for stroke_size in 0..2 * stroke_size {
let stroke_size = stroke_size as f32 / 2.;
if character == '\u{2571}' || character == '\u{2573}' {
let h = y_end - stroke_size;
let from_y = f_x(from_x, h);
let to_y = f_x(to_x, h);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
if character == '\u{2572}' || character == '\u{2573}' {
let from_y = g_x(from_x, stroke_size);
let to_y = g_x(to_x, stroke_size);
canvas.draw_line(from_x, from_y, to_x, to_y);
}
}
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
return RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
};
},
_ => Canvas::new(width, height),
};
match character {
// Horizontal dashes: '┄', '┅', '┈', '┉', '╌', '╍'.
'\u{2504}' | '\u{2505}' | '\u{2508}' | '\u{2509}' | '\u{254c}' | '\u{254d}' => {
let (num_gaps, stroke_size) = match character {
'\u{2504}' => (2, stroke_size),
'\u{2505}' => (2, heavy_stroke_size),
'\u{2508}' => (3, stroke_size),
'\u{2509}' => (3, heavy_stroke_size),
'\u{254c}' => (1, stroke_size),
'\u{254d}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(width / 8, 1);
let dash_len =
cmp::max(width.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let y = canvas.y_center();
for gap in 0..=num_gaps {
let x = cmp::min(gap * (dash_len + dash_gap_len), width);
canvas.draw_h_line(x as f32, y, dash_len as f32, stroke_size);
}
},
// Vertical dashes: '┆', '┇', '┊', '┋', '╎', '╏'.
'\u{2506}' | '\u{2507}' | '\u{250a}' | '\u{250b}' | '\u{254e}' | '\u{254f}' => {
let (num_gaps, stroke_size) = match character {
'\u{2506}' => (2, stroke_size),
'\u{2507}' => (2, heavy_stroke_size),
'\u{250a}' => (3, stroke_size),
'\u{250b}' => (3, heavy_stroke_size),
'\u{254e}' => (1, stroke_size),
'\u{254f}' => (1, heavy_stroke_size),
_ => unreachable!(),
};
let dash_gap_len = cmp::max(height / 8, 1);
let dash_len =
cmp::max(height.saturating_sub(dash_gap_len * num_gaps) / (num_gaps + 1), 1);
let x = canvas.x_center();
for gap in 0..=num_gaps {
let y = cmp::min(gap * (dash_len + dash_gap_len), height);
canvas.draw_v_line(x, y as f32, dash_len as f32, stroke_size);
}
},
// Horizontal lines: '─', '━', '╴', '╶', '╸', '╺'.
// Vertical lines: '│', '┃', '╵', '╷', '╹', '╻'.
// Light and heavy line box components:
// '┌','┍','┎','┏','┐','┑','┒','┓','└','┕','┖','┗','┘','┙','┚','┛',├','┝','┞','┟','┠','┡',
// '┢','┣','┤','┥','┦','┧','┨','┩','┪','┫','┬','┭','┮','┯','┰','┱','┲','┳','┴','┵','┶','┷',
// '┸','┹','┺','┻','┼','┽','┾','┿','╀','╁','╂','╃','╄','╅','╆','╇','╈','╉','╊','╋'.
// Mixed light and heavy lines: '╼', '╽', '╾', '╿'.
'\u{2500}'..='\u{2503}' | '\u{250c}'..='\u{254b}' | '\u{2574}'..='\u{257f}' => {
// Left horizontal line.
let stroke_size_h1 = match character {
'\u{2500}' | '\u{2510}' | '\u{2512}' | '\u{2518}' | '\u{251a}' | '\u{2524}'
| '\u{2526}' | '\u{2527}' | '\u{2528}' | '\u{252c}' | '\u{252e}' | '\u{2530}'
| '\u{2532}' | '\u{2534}' | '\u{2536}' | '\u{2538}' | '\u{253a}' | '\u{253c}'
| '\u{253e}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2544}' | '\u{2546}'
| '\u{254a}' | '\u{2574}' | '\u{257c}' => stroke_size,
'\u{2501}' | '\u{2511}' | '\u{2513}' | '\u{2519}' | '\u{251b}' | '\u{2525}'
| '\u{2529}' | '\u{252a}' | '\u{252b}' | '\u{252d}' | '\u{252f}' | '\u{2531}'
| '\u{2533}' | '\u{2535}' | '\u{2537}' | '\u{2539}' | '\u{253b}' | '\u{253d}'
| '\u{253f}' | '\u{2543}' | '\u{2545}' | '\u{2547}' | '\u{2548}' | '\u{2549}'
| '\u{254b}' | '\u{2578}' | '\u{257e}' => heavy_stroke_size,
_ => 0,
};
// Right horizontal line.
let stroke_size_h2 = match character {
'\u{2500}' | '\u{250c}' | '\u{250e}' | '\u{2514}' | '\u{2516}' | '\u{251c}'
| '\u{251e}' | '\u{251f}' | '\u{2520}' | '\u{252c}' | '\u{252d}' | '\u{2530}'
| '\u{2531}' | '\u{2534}' | '\u{2535}' | '\u{2538}' | '\u{2539}' | '\u{253c}'
| '\u{253d}' | '\u{2540}' | '\u{2541}' | '\u{2542}' | '\u{2543}' | '\u{2545}'
| '\u{2549}' | '\u{2576}' | '\u{257e}' => stroke_size,
'\u{2501}' | '\u{250d}' | '\u{250f}' | '\u{2515}' | '\u{2517}' | '\u{251d}'
| '\u{2521}' | '\u{2522}' | '\u{2523}' | '\u{252e}' | '\u{252f}' | '\u{2532}'
| '\u{2533}' | '\u{2536}' | '\u{2537}' | '\u{253a}' | '\u{253b}' | '\u{253e}'
| '\u{253f}' | '\u{2544}' | '\u{2546}' | '\u{2547}' | '\u{2548}' | '\u{254a}'
| '\u{254b}' | '\u{257a}' | '\u{257c}' => heavy_stroke_size,
_ => 0,
};
// Top vertical line.
let stroke_size_v1 = match character {
'\u{2502}' | '\u{2514}' | '\u{2515}' | '\u{2518}' | '\u{2519}' | '\u{251c}'
| '\u{251d}' | '\u{251f}' | '\u{2522}' | '\u{2524}' | '\u{2525}' | '\u{2527}'
| '\u{252a}' | '\u{2534}' | '\u{2535}' | '\u{2536}' | '\u{2537}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2541}' | '\u{2545}' | '\u{2546}'
| '\u{2548}' | '\u{2575}' | '\u{257d}' => stroke_size,
'\u{2503}' | '\u{2516}' | '\u{2517}' | '\u{251a}' | '\u{251b}' | '\u{251e}'
| '\u{2520}' | '\u{2521}' | '\u{2523}' | '\u{2526}' | '\u{2528}' | '\u{2529}'
| '\u{252b}' | '\u{2538}' | '\u{2539}' | '\u{253a}' | '\u{253b}' | '\u{2540}'
| '\u{2542}' | '\u{2543}' | '\u{2544}' | '\u{2547}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{2579}' | '\u{257f}' => heavy_stroke_size,
_ => 0,
};
// Bottom vertical line.
let stroke_size_v2 = match character {
'\u{2502}' | '\u{250c}' | '\u{250d}' | '\u{2510}' | '\u{2511}' | '\u{251c}'
| '\u{251d}' | '\u{251e}' | '\u{2521}' | '\u{2524}' | '\u{2525}' | '\u{2526}'
| '\u{2529}' | '\u{252c}' | '\u{252d}' | '\u{252e}' | '\u{252f}' | '\u{253c}'
| '\u{253d}' | '\u{253e}' | '\u{253f}' | '\u{2540}' | '\u{2543}' | '\u{2544}'
| '\u{2547}' | '\u{2577}' | '\u{257f}' => stroke_size,
'\u{2503}' | '\u{250e}' | '\u{250f}' | '\u{2512}' | '\u{2513}' | '\u{251f}'
| '\u{2520}' | '\u{2522}' | '\u{2523}' | '\u{2527}' | '\u{2528}' | '\u{252a}'
| '\u{252b}' | '\u{2530}' | '\u{2531}' | '\u{2532}' | '\u{2533}' | '\u{2541}'
| '\u{2542}' | '\u{2545}' | '\u{2546}' | '\u{2548}' | '\u{2549}' | '\u{254a}'
| '\u{254b}' | '\u{257b}' | '\u{257d}' => heavy_stroke_size,
_ => 0,
};
let x_v = canvas.x_center();
let y_h = canvas.y_center();
let v_line_bounds_top = canvas.v_line_bounds(x_v, stroke_size_v1);
let v_line_bounds_bot = canvas.v_line_bounds(x_v, stroke_size_v2);
let h_line_bounds_left = canvas.h_line_bounds(y_h, stroke_size_h1);
let h_line_bounds_right = canvas.h_line_bounds(y_h, stroke_size_h2);
let size_h1 = cmp::max(v_line_bounds_top.1 as i32, v_line_bounds_bot.1 as i32) as f32;
let x_h = cmp::min(v_line_bounds_top.0 as i32, v_line_bounds_bot.0 as i32) as f32;
let size_h2 = width as f32 - x_h;
let size_v1 =
cmp::max(h_line_bounds_left.1 as i32, h_line_bounds_right.1 as i32) as f32;
let y_v = cmp::min(h_line_bounds_left.0 as i32, h_line_bounds_right.0 as i32) as f32;
let size_v2 = height as f32 - y_v;
// Left horizontal line.
canvas.draw_h_line(0., y_h, size_h1, stroke_size_h1);
// Right horizontal line.
canvas.draw_h_line(x_h, y_h, size_h2, stroke_size_h2);
// Top vertical line.
canvas.draw_v_line(x_v, 0., size_v1, stroke_size_v1);
// Bottom vertical line.
canvas.draw_v_line(x_v, y_v, size_v2, stroke_size_v2);
},
// Light and double line box components:
// '═','║','╒','╓','╔','╕','╖','╗','╘','╙','╚','╛','╜','╝','╞','╟','╠','╡','╢','╣','╤','╥',
// '╦','╧','╨','╩','╪','╫','╬'.
'\u{2550}'..='\u{256c}' => {
let v_lines = match character {
'\u{2552}' | '\u{2555}' | '\u{2558}' | '\u{255b}' | '\u{255e}' | '\u{2561}'
| '\u{2564}' | '\u{2567}' | '\u{256a}' => (canvas.x_center(), canvas.x_center()),
_ => {
let v_line_bounds = canvas.v_line_bounds(canvas.x_center(), stroke_size);
let left_line = cmp::max(v_line_bounds.0 as i32 - 1, 0) as f32;
let right_line = cmp::min(v_line_bounds.1 as i32 + 1, width as i32) as f32;
(left_line, right_line)
},
};
let h_lines = match character {
'\u{2553}' | '\u{2556}' | '\u{2559}' | '\u{255c}' | '\u{255f}' | '\u{2562}'
| '\u{2565}' | '\u{2568}' | '\u{256b}' => (canvas.y_center(), canvas.y_center()),
_ => {
let h_line_bounds = canvas.h_line_bounds(canvas.y_center(), stroke_size);
let top_line = cmp::max(h_line_bounds.0 as i32 - 1, 0) as f32;
let bottom_line = cmp::min(h_line_bounds.1 as i32 + 1, height as i32) as f32;
(top_line, bottom_line)
},
};
// Get bounds for each double line we could have.
let v_left_bounds = canvas.v_line_bounds(v_lines.0, stroke_size);
let v_right_bounds = canvas.v_line_bounds(v_lines.1, stroke_size);
let h_top_bounds = canvas.h_line_bounds(h_lines.0, stroke_size);
let h_bot_bounds = canvas.h_line_bounds(h_lines.1, stroke_size);
let height = height as f32;
let width = width as f32;
// Left horizontal part.
let (top_left_size, bot_left_size) = match character {
'\u{2550}' | '\u{256b}' => (canvas.x_center(), canvas.x_center()),
'\u{2555}'..='\u{2557}' => (v_right_bounds.1, v_left_bounds.1),
'\u{255b}'..='\u{255d}' => (v_left_bounds.1, v_right_bounds.1),
'\u{2561}'..='\u{2563}' | '\u{256a}' | '\u{256c}' => {
(v_left_bounds.1, v_left_bounds.1)
},
'\u{2564}'..='\u{2568}' => (canvas.x_center(), v_left_bounds.1),
'\u{2569}'..='\u{2569}' => (v_left_bounds.1, canvas.x_center()),
_ => (0., 0.),
};
// Right horizontal part.
let (top_right_x, bot_right_x, right_size) = match character {
'\u{2550}' | '\u{2565}' | '\u{256b}' => {
(canvas.x_center(), canvas.x_center(), width)
},
'\u{2552}'..='\u{2554}' | '\u{2568}' => (v_left_bounds.0, v_right_bounds.0, width),
'\u{2558}'..='\u{255a}' => (v_right_bounds.0, v_left_bounds.0, width),
'\u{255e}'..='\u{2560}' | '\u{256a}' | '\u{256c}' => {
(v_right_bounds.0, v_right_bounds.0, width)
},
'\u{2564}' | '\u{2566}' => (canvas.x_center(), v_right_bounds.0, width),
'\u{2567}' | '\u{2569}' => (v_right_bounds.0, canvas.x_center(), width),
_ => (0., 0., 0.),
};
// Top vertical part.
let (left_top_size, right_top_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center()),
'\u{2558}'..='\u{255c}' | '\u{2568}' => (h_bot_bounds.1, h_top_bounds.1),
'\u{255d}' => (h_top_bounds.1, h_bot_bounds.1),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_top_bounds.1),
'\u{2561}'..='\u{2563}' => (h_top_bounds.1, canvas.y_center()),
'\u{2567}' | '\u{2569}' | '\u{256b}' | '\u{256c}' => {
(h_top_bounds.1, h_top_bounds.1)
},
_ => (0., 0.),
};
// Bottom vertical part.
let (left_bot_y, right_bot_y, bottom_size) = match character {
'\u{2551}' | '\u{256a}' => (canvas.y_center(), canvas.y_center(), height),
'\u{2552}'..='\u{2554}' => (h_top_bounds.0, h_bot_bounds.0, height),
'\u{2555}'..='\u{2557}' => (h_bot_bounds.0, h_top_bounds.0, height),
'\u{255e}'..='\u{2560}' => (canvas.y_center(), h_bot_bounds.0, height),
'\u{2561}'..='\u{2563}' => (h_bot_bounds.0, canvas.y_center(), height),
'\u{2564}'..='\u{2566}' | '\u{256b}' | '\u{256c}' => {
(h_bot_bounds.0, h_bot_bounds.0, height)
},
_ => (0., 0., 0.),
};
// Left horizontal line.
canvas.draw_h_line(0., h_lines.0, top_left_size, stroke_size);
canvas.draw_h_line(0., h_lines.1, bot_left_size, stroke_size);
// Right horizontal line.
canvas.draw_h_line(top_right_x, h_lines.0, right_size, stroke_size);
canvas.draw_h_line(bot_right_x, h_lines.1, right_size, stroke_size);
// Top vertical line.
canvas.draw_v_line(v_lines.0, 0., left_top_size, stroke_size);
canvas.draw_v_line(v_lines.1, 0., right_top_size, stroke_size);
// Bottom vertical line.
canvas.draw_v_line(v_lines.0, left_bot_y, bottom_size, stroke_size);
canvas.draw_v_line(v_lines.1, right_bot_y, bottom_size, stroke_size);
},
// Arcs: '╭', '╮', '╯', '╰'.
'\u{256d}' | '\u{256e}' | '\u{256f}' | '\u{2570}' => {
canvas.draw_ellipse_arc(stroke_size);
// Mirror `X` axis.
if character == '\u{256d}' || character == '\u{2570}' {
let center = canvas.x_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != width % 2);
let buffer = canvas.buffer_mut();
for y in 1..height {
let left = (y - 1) * width;
let right = y * width - 1;
if extra_offset != 0 {
buffer[right] = buffer[left];
}
for offset in 0..center {
buffer.swap(left + offset, right - offset - extra_offset);
}
}
}
// Mirror `Y` axis.
if character == '\u{256d}' || character == '\u{256e}' {
let center = canvas.y_center() as usize;
let extra_offset = usize::from(stroke_size % 2 != height % 2);
let buffer = canvas.buffer_mut();
if extra_offset != 0 {
let bottom_row = (height - 1) * width;
for index in 0..width {
buffer[bottom_row + index] = buffer[index];
}
}
for offset in 1..=center {
let top_row = (offset - 1) * width;
let bottom_row = (height - offset - extra_offset) * width;
for index in 0..width {
buffer.swap(top_row + index, bottom_row + index);
}
}
}
},
// Parts of full block: '▀', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '▔', '▉', '▊', '▋', '▌',
// '▍', '▎', '▏', '▐', '▕'.
'\u{2580}'..='\u{2587}' | '\u{2589}'..='\u{2590}' | '\u{2594}' | '\u{2595}' => {
let width = width as f32;
let height = height as f32;
let mut rect_width = match character {
'\u{2589}' => width * 7. / 8.,
'\u{258a}' => width * 6. / 8.,
'\u{258b}' => width * 5. / 8.,
'\u{258c}' => width * 4. / 8.,
'\u{258d}' => width * 3. / 8.,
'\u{258e}' => width * 2. / 8.,
'\u{258f}' => width * 1. / 8.,
'\u{2590}' => width * 4. / 8.,
'\u{2595}' => width * 1. / 8.,
_ => width,
};
let (mut rect_height, mut y) = match character {
'\u{2580}' => (height * 4. / 8., height * 8. / 8.),
'\u{2581}' => (height * 1. / 8., height * 1. / 8.),
'\u{2582}' => (height * 2. / 8., height * 2. / 8.),
'\u{2583}' => (height * 3. / 8., height * 3. / 8.),
'\u{2584}' => (height * 4. / 8., height * 4. / 8.),
'\u{2585}' => (height * 5. / 8., height * 5. / 8.),
'\u{2586}' => (height * 6. / 8., height * 6. / 8.),
'\u{2587}' => (height * 7. / 8., height * 7. / 8.),
'\u{2594}' => (height * 1. / 8., height * 8. / 8.),
_ => (height, height),
};
// Fix `y` coordinates.
y = (height - y).round();
// Ensure that resulted glyph will be visible and also round sizes instead of straight
// flooring them.
rect_width = rect_width.round().max(1.);
rect_height = rect_height.round().max(1.);
let x = match character {
'\u{2590}' => canvas.x_center(),
'\u{2595}' => width - rect_width,
_ => 0.,
};
canvas.draw_rect(x, y, rect_width, rect_height, COLOR_FILL);
},
// Shades: '░', '▒', '▓', '█'.
'\u{2588}' | '\u{2591}' | '\u{2592}' | '\u{2593}' => {
let color = match character {
'\u{2588}' => COLOR_FILL,
'\u{2591}' => COLOR_FILL_ALPHA_STEP_3,
'\u{2592}' => COLOR_FILL_ALPHA_STEP_2,
'\u{2593}' => COLOR_FILL_ALPHA_STEP_1,
_ => unreachable!(),
};
canvas.fill(color);
},
// Quadrants: '▖', '▗', '▘', '▙', '▚', '▛', '▜', '▝', '▞', '▟'.
'\u{2596}'..='\u{259F}' => {
let x_center = canvas.x_center().round().max(1.);
let y_center = canvas.y_center().round().max(1.);
let (w_second, h_second) = match character {
'\u{2598}' | '\u{2599}' | '\u{259a}' | '\u{259b}' | '\u{259c}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_first, h_first) = match character {
'\u{259b}' | '\u{259c}' | '\u{259d}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_third, h_third) = match character {
'\u{2596}' | '\u{2599}' | '\u{259b}' | '\u{259e}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
let (w_fourth, h_fourth) = match character {
'\u{2597}' | '\u{2599}' | '\u{259a}' | '\u{259c}' | '\u{259f}' => {
(x_center, y_center)
},
_ => (0., 0.),
};
// Second quadrant.
canvas.draw_rect(0., 0., w_second, h_second, COLOR_FILL);
// First quadrant.
canvas.draw_rect(x_center, 0., w_first, h_first, COLOR_FILL);
// Third quadrant.
canvas.draw_rect(0., y_center, w_third, h_third, COLOR_FILL);
// Fourth quadrant.
canvas.draw_rect(x_center, y_center, w_fourth, h_fourth, COLOR_FILL);
},
// Sextants: '🬀', '🬁', '🬂', '🬃', '🬄', '🬅', '🬆', '🬇', '🬈', '🬉', '🬊', '🬋', '🬌', '🬍', '🬎',
// '🬏', '🬐', '🬑', '🬒', '🬓', '🬔', '🬕', '🬖', '🬗', '🬘', '🬙', '🬚', '🬛', '🬜', '🬝', '🬞', '🬟',
// '🬠', '🬡', '🬢', '🬣', '🬤', '🬥', '🬦', '🬧', '🬨', '🬩', '🬪', '🬫', '🬬', '🬭', '🬮', '🬯', '🬰',
// '🬱', '🬲', '🬳', '🬴', '🬵', '🬶', '🬷', '🬸', '🬹', '🬺', '🬻'.
'\u{1fb00}'..='\u{1fb3b}' => {
let x_center = canvas.x_center().round().max(1.);
let y_third = (height as f32 / 3.).round().max(1.);
let y_last_third = height as f32 - 2. * y_third;
let (w_top_left, h_top_left) = match character {
'\u{1fb00}' | '\u{1fb02}' | '\u{1fb04}' | '\u{1fb06}' | '\u{1fb08}'
| '\u{1fb0a}' | '\u{1fb0c}' | '\u{1fb0e}' | '\u{1fb10}' | '\u{1fb12}'
| '\u{1fb15}' | '\u{1fb17}' | '\u{1fb19}' | '\u{1fb1b}' | '\u{1fb1d}'
| '\u{1fb1f}' | '\u{1fb21}' | '\u{1fb23}' | '\u{1fb25}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb2a}' | '\u{1fb2c}' | '\u{1fb2e}' | '\u{1fb30}'
| '\u{1fb32}' | '\u{1fb34}' | '\u{1fb36}' | '\u{1fb38}' | '\u{1fb3a}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_top_right, h_top_right) = match character {
'\u{1fb01}' | '\u{1fb02}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb09}'
| '\u{1fb0a}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb11}' | '\u{1fb12}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb24}' | '\u{1fb25}'
| '\u{1fb28}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb2f}' | '\u{1fb30}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb37}' | '\u{1fb38}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_left, h_mid_left) = match character {
'\u{1fb03}' | '\u{1fb04}' | '\u{1fb05}' | '\u{1fb06}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb13}' | '\u{1fb14}'
| '\u{1fb15}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb22}' | '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb31}' | '\u{1fb32}'
| '\u{1fb33}' | '\u{1fb34}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_mid_right, h_mid_right) = match character {
'\u{1fb07}' | '\u{1fb08}' | '\u{1fb09}' | '\u{1fb0a}' | '\u{1fb0b}'
| '\u{1fb0c}' | '\u{1fb0d}' | '\u{1fb0e}' | '\u{1fb16}' | '\u{1fb17}'
| '\u{1fb18}' | '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}'
| '\u{1fb1d}' | '\u{1fb26}' | '\u{1fb27}' | '\u{1fb28}' | '\u{1fb29}'
| '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_third)
},
_ => (0., 0.),
};
let (w_bottom_left, h_bottom_left) = match character {
'\u{1fb0f}' | '\u{1fb10}' | '\u{1fb11}' | '\u{1fb12}' | '\u{1fb13}'
| '\u{1fb14}' | '\u{1fb15}' | '\u{1fb16}' | '\u{1fb17}' | '\u{1fb18}'
| '\u{1fb19}' | '\u{1fb1a}' | '\u{1fb1b}' | '\u{1fb1c}' | '\u{1fb1d}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
let (w_bottom_right, h_bottom_right) = match character {
'\u{1fb1e}' | '\u{1fb1f}' | '\u{1fb20}' | '\u{1fb21}' | '\u{1fb22}'
| '\u{1fb23}' | '\u{1fb24}' | '\u{1fb25}' | '\u{1fb26}' | '\u{1fb27}'
| '\u{1fb28}' | '\u{1fb29}' | '\u{1fb2a}' | '\u{1fb2b}' | '\u{1fb2c}'
| '\u{1fb2d}' | '\u{1fb2e}' | '\u{1fb2f}' | '\u{1fb30}' | '\u{1fb31}'
| '\u{1fb32}' | '\u{1fb33}' | '\u{1fb34}' | '\u{1fb35}' | '\u{1fb36}'
| '\u{1fb37}' | '\u{1fb38}' | '\u{1fb39}' | '\u{1fb3a}' | '\u{1fb3b}' => {
(x_center, y_last_third)
},
_ => (0., 0.),
};
canvas.draw_rect(0., 0., w_top_left, h_top_left, COLOR_FILL);
canvas.draw_rect(x_center, 0., w_top_right, h_top_right, COLOR_FILL);
canvas.draw_rect(0., y_third, w_mid_left, h_mid_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third, w_mid_right, h_mid_right, COLOR_FILL);
canvas.draw_rect(0., y_third * 2., w_bottom_left, h_bottom_left, COLOR_FILL);
canvas.draw_rect(x_center, y_third * 2., w_bottom_right, h_bottom_right, COLOR_FILL);
},
_ => unreachable!(),
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
}
}
fn powerline_drawing(
character: char,
metrics: &Metrics,
offset: &Delta<i8>,
) -> Option<RasterizedGlyph> {
let height = (metrics.line_height as i32 + offset.y as i32) as usize;
let width = (metrics.average_advance as i32 + offset.x as i32) as usize;
let extra_thickness = calculate_stroke_size(width) as i32 - 1;
let mut canvas = Canvas::new(width, height);
let slope = 1;
let top_y = 1;
let bottom_y = height as i32 - top_y - 1;
// Start with offset `1` and draw until the intersection of the f(x) = slope * x + 1 and
// g(x) = H - slope * x - 1 lines. The intersection happens when f(x) = g(x), which is at
// x = (H - 2) / (2 * slope).
let x_intersection = (height as i32 + 1) / 2 - 1;
// Don't use built-in font if we'd cut the tip too much, for example when the font is really
// narrow.
if x_intersection - width as i32 > 1 {
return None;
}
let top_line = (0..x_intersection).map(|x| line_equation(slope, x, top_y));
let bottom_line = (0..x_intersection).map(|x| line_equation(-slope, x, bottom_y));
// Inner lines to make arrows thicker.
let mut top_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(slope, x, top_y + extra_thickness));
let mut bottom_inner_line = (0..x_intersection - extra_thickness)
.map(|x| line_equation(-slope, x, bottom_y - extra_thickness));
// NOTE: top_line and bottom_line have the same amount of iterations.
for (p1, p2) in top_line.zip(bottom_line) {
if character == POWERLINE_TRIANGLE_LTR || character == POWERLINE_TRIANGLE_RTL {
canvas.draw_rect(0., p1.1, p1.0 + 1., 1., COLOR_FILL);
canvas.draw_rect(0., p2.1, p2.0 + 1., 1., COLOR_FILL);
} else if character == POWERLINE_ARROW_LTR || character == POWERLINE_ARROW_RTL {
let p3 = top_inner_line.next().unwrap_or(p2);
let p4 = bottom_inner_line.next().unwrap_or(p1);
// If we can't fit the entire arrow in the cell, we cut off the tip of the arrow by
// drawing a rectangle between the two lines.
if p1.0 as usize + 1 == width {
canvas.draw_rect(p1.0, p1.1, 1., p2.1 - p1.1 + 1., COLOR_FILL);
break;
} else {
canvas.draw_rect(p1.0, p1.1, 1., p3.1 - p1.1 + 1., COLOR_FILL);
canvas.draw_rect(p4.0, p4.1, 1., p2.1 - p4.1 + 1., COLOR_FILL);
}
}
}
if character == POWERLINE_TRIANGLE_RTL || character == POWERLINE_ARROW_RTL {
canvas.flip_horizontal();
}
let top = height as i32 + metrics.descent as i32;
let buffer = BitmapBuffer::Rgb(canvas.into_raw());
Some(RasterizedGlyph {
character,
top,
left: 0,
height: height as i32,
width: width as i32,
buffer,
advance: (width as i32, height as i32),
})
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct Pixel {
_r: u8,
_g: u8,
_b: u8,
}
impl Pixel {
fn gray(color: u8) -> Self {
Self { _r: color, _g: color, _b: color }
}
}
impl ops::Add for Pixel {
type Output = Pixel;
fn add(self, rhs: Pixel) -> Self::Output {
let _r = self._r.saturating_add(rhs._r);
let _g = self._g.saturating_add(rhs._g);
let _b = self._b.saturating_add(rhs._b);
Pixel { _r, _g, _b }
}
}
impl ops::Div<u8> for Pixel {
type Output = Pixel;
fn div(self, rhs: u8) -> Self::Output {
let _r = self._r / rhs;
let _g = self._g / rhs;
let _b = self._b / rhs;
Pixel { _r, _g, _b }
}
}
/// Canvas which is used for simple line drawing operations.
///
/// The coordinate system is the following:
///
/// 0 x
/// --------------→
/// |
/// |
/// |
/// |
/// |
/// |
/// y↓
struct Canvas {
/// Canvas width.
width: usize,
/// Canvas height.
height: usize,
/// Canvas buffer we draw on.
buffer: Vec<Pixel>,
}
impl Canvas {
/// Builds new `Canvas` for line drawing with the given `width` and `height` with default color.
fn new(width: usize, height: usize) -> Self {
let buffer = vec![Pixel::default(); width * height];
Self { width, height, buffer }
}
/// Vertical center of the `Canvas`.
fn y_center(&self) -> f32 {
self.height as f32 / 2.
}
/// Horizontal center of the `Canvas`.
fn x_center(&self) -> f32 {
self.width as f32 / 2.
}
/// Canvas underlying buffer for direct manipulation
fn buffer_mut(&mut self) -> &mut [Pixel] {
&mut self.buffer
}
/// Gives bounds for horizontal straight line on `y` with `stroke_size`.
fn h_line_bounds(&self, y: f32, stroke_size: usize) -> (f32, f32) {
let start_y = cmp::max((y - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_y = cmp::min((y + stroke_size as f32 / 2.) as i32, self.height as i32) as f32;
(start_y, end_y)
}
/// Gives bounds for vertical straight line on `y` with `stroke_size`.
fn v_line_bounds(&self, x: f32, stroke_size: usize) -> (f32, f32) {
let start_x = cmp::max((x - stroke_size as f32 / 2.) as i32, 0) as f32;
let end_x = cmp::min((x + stroke_size as f32 / 2.) as i32, self.width as i32) as f32;
(start_x, end_x)
}
/// Flip horizontally.
fn flip_horizontal(&mut self) {
for row in 0..self.height {
for col in 0..self.width / 2 {
let index = row * self.width;
self.buffer.swap(index + col, index + self.width - col - 1)
}
}
}
/// Draws a horizontal straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_h_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_y, end_y) = self.h_line_bounds(y, stroke_size);
self.draw_rect(x, start_y, size, end_y - start_y, COLOR_FILL);
}
/// Draws a vertical straight line from (`x`, `y`) of `size` with the given `stroke_size`.
fn draw_v_line(&mut self, x: f32, y: f32, size: f32, stroke_size: usize) {
let (start_x, end_x) = self.v_line_bounds(x, stroke_size);
self.draw_rect(start_x, y, end_x - start_x, size, COLOR_FILL);
}
/// Draws a rect from the (`x`, `y`) of the given `width` and `height` using `color`.
fn draw_rect(&mut self, x: f32, y: f32, width: f32, height: f32, color: Pixel) {
let start_x = x as usize;
let end_x = cmp::min((x + width) as usize, self.width);
let start_y = y as usize;
let end_y = cmp::min((y + height) as usize, self.height);
for y in start_y..end_y {
let y = y * self.width;
self.buffer[start_x + y..end_x + y].fill(color);
}
}
/// Put pixel into buffer with the given color if the color is brighter than the one buffer
/// already has in place.
#[inline]
fn put_pixel(&mut self, x: f32, y: f32, color: Pixel) {
if x < 0. || y < 0. || x > self.width as f32 - 1. || y > self.height as f32 - 1. {
return;
}
let index = x as usize + y as usize * self.width;
if color._r > self.buffer[index]._r {
self.buffer[index] = color;
}
}
/// Xiaolin Wu's line drawing from (`from_x`, `from_y`) to (`to_x`, `to_y`).
fn draw_line(&mut self, mut from_x: f32, mut from_y: f32, mut to_x: f32, mut to_y: f32) {
let steep = (to_y - from_y).abs() > (to_x - from_x).abs();
if steep {
mem::swap(&mut from_x, &mut from_y);
mem::swap(&mut to_x, &mut to_y);
}
if from_x > to_x {
mem::swap(&mut from_x, &mut to_x);
mem::swap(&mut from_y, &mut to_y);
}
let delta_x = to_x - from_x;
let delta_y = to_y - from_y;
let gradient = if delta_x.abs() <= f32::EPSILON { 1. } else { delta_y / delta_x };
let x_end = f32::round(from_x);
let y_end = from_y + gradient * (x_end - from_x);
let x_gap = 1. - (from_x + 0.5).fract();
let xpxl1 = x_end;
let ypxl1 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl1, xpxl1, color_1);
self.put_pixel(ypxl1 + 1., xpxl1, color_2);
} else {
self.put_pixel(xpxl1, ypxl1, color_1);
self.put_pixel(xpxl1 + 1., ypxl1, color_2);
}
let mut intery = y_end + gradient;
let x_end = f32::round(to_x);
let y_end = to_y + gradient * (x_end - to_x);
let x_gap = (to_x + 0.5).fract();
let xpxl2 = x_end;
let ypxl2 = y_end.trunc();
let color_1 = Pixel::gray(((1. - y_end.fract()) * x_gap * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((y_end.fract() * x_gap * COLOR_FILL._r as f32) as u8);
if steep {
self.put_pixel(ypxl2, xpxl2, color_1);
self.put_pixel(ypxl2 + 1., xpxl2, color_2);
} else {
self.put_pixel(xpxl2, ypxl2, color_1);
self.put_pixel(xpxl2, ypxl2 + 1., color_2);
}
if steep {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(intery.trunc(), x as f32, color_1);
self.put_pixel(intery.trunc() + 1., x as f32, color_2);
intery += gradient;
}
} else {
for x in xpxl1 as i32 + 1..xpxl2 as i32 {
let color_1 = Pixel::gray(((1. - intery.fract()) * COLOR_FILL._r as f32) as u8);
let color_2 = Pixel::gray((intery.fract() * COLOR_FILL._r as f32) as u8);
self.put_pixel(x as f32, intery.trunc(), color_1);
self.put_pixel(x as f32, intery.trunc() + 1., color_2);
intery += gradient;
}
}
}
/// Draws a part of an ellipse centered in `(0., 0.)` with `self.x_center()` and `self.y_center`
/// vertex and co-vertex respectively using a given `stroke` in the bottom-right quadrant of the
/// `Canvas` coordinate system.
fn draw_ellipse_arc(&mut self, stroke_size: usize) {
fn colors_with_error(error: f32, max_transparency: f32) -> (Pixel, Pixel) {
let transparency = error * max_transparency;
let alpha_1 = 1. - transparency;
let alpha_2 = 1. - (max_transparency - transparency);
let color_1 = Pixel::gray((COLOR_FILL._r as f32 * alpha_1) as u8);
let color_2 = Pixel::gray((COLOR_FILL._r as f32 * alpha_2) as u8);
(color_1, color_2)
}
let h_line_bounds = self.h_line_bounds(self.y_center(), stroke_size);
let v_line_bounds = self.v_line_bounds(self.x_center(), stroke_size);
let h_line_bounds = (h_line_bounds.0 as usize, h_line_bounds.1 as usize);
let v_line_bounds = (v_line_bounds.0 as usize, v_line_bounds.1 as usize);
let max_transparency = 0.5;
for (radius_y, radius_x) in
(h_line_bounds.0..h_line_bounds.1).zip(v_line_bounds.0..v_line_bounds.1)
{
let radius_x = radius_x as f32;
let radius_y = radius_y as f32;
let radius_x2 = radius_x * radius_x;
let radius_y2 = radius_y * radius_y;
let quarter = f32::round(radius_x2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for x in 0..=quarter {
let x = x as f32;
let y = radius_y * f32::sqrt(1. - x * x / radius_x2);
let error = y.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x = x.clamp(0., radius_x);
let y_next = (y + 1.).clamp(0., h_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., h_line_bounds.1 as f32 - 1.);
self.put_pixel(x, y, color_1);
self.put_pixel(x, y_next, color_2);
}
let quarter = f32::round(radius_y2 / f32::sqrt(radius_x2 + radius_y2)) as usize;
for y in 0..=quarter {
let y = y as f32;
let x = radius_x * f32::sqrt(1. - y * y / radius_y2);
let error = x - x.fract();
let (color_1, color_2) = colors_with_error(error, max_transparency);
let x_next = (x + 1.).clamp(0., v_line_bounds.1 as f32 - 1.);
let x = x.clamp(0., v_line_bounds.1 as f32 - 1.);
let y = y.clamp(0., radius_y);
self.put_pixel(x, y, color_1);
self.put_pixel(x_next, y, color_2);
}
}
// Ensure the part closer to edges is properly filled.
self.draw_h_line(0., self.y_center(), stroke_size as f32, stroke_size);
self.draw_v_line(self.x_center(), 0., stroke_size as f32, stroke_size);
// Fill the resulted arc, since it could have gaps in-between.
for y in 0..self.height {
let row = y * self.width;
let left = match self.buffer[row..row + self.width].iter().position(|p| p._r != 0) {
Some(left) => row + left,
_ => continue,
};
let right = match self.buffer[row..row + self.width].iter().rposition(|p| p._r != 0) {
Some(right) => row + right,
_ => continue,
};
for index in left + 1..right {
self.buffer[index] =
self.buffer[index] + self.buffer[index - 1] / 2 + self.buffer[index + 1] / 2;
}
}
}
/// Fills the `Canvas` with the given `Color`.
fn fill(&mut self, color: Pixel) {
self.buffer.fill(color);
}
/// Consumes `Canvas` and returns its underlying storage as raw byte vector.
fn into_raw(self) -> Vec<u8> {
// SAFETY This is safe since we use `repr(packed)` on `Pixel` struct for underlying storage
// of the `Canvas` buffer which consists of three u8 values.
unsafe {
let capacity = self.buffer.capacity() * mem::size_of::<Pixel>();
let len = self.buffer.len() * mem::size_of::<Pixel>();
let buf = self.buffer.as_ptr() as *mut u8;
mem::forget(self.buffer);
Vec::from_raw_parts(buf, len, capacity)
}
}
}
/// Compute line width.
fn calculate_stroke_size(cell_width: usize) -> usize {
// Use one eight of the cell width, since this is used as a step size for block elements.
cmp::max((cell_width as f32 / 8.).round() as usize, 1)
}
/// `f(x) = slope * x + offset` equation.
fn line_equation(slope: i32, x: i32, offset: i32) -> (f32, f32) {
(x as f32, (slope * x + offset) as f32)
}
#[cfg(test)]
mod tests {
use super::*;
use crossfont::Metrics;
// Dummy metrics values to test builtin glyphs coverage.
const METRICS: Metrics = Metrics {
average_advance: 6.,
line_height: 16.,
descent: 4.,
underline_position: 2.,
underline_thickness: 2.,
strikeout_position: 2.,
strikeout_thickness: 2.,
};
#[test]
fn builtin_line_drawing_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in ('\u{2500}'..='\u{259f}').chain('\u{1fb00}'..='\u{1fb3b}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{2450}'..'\u{2500}').chain('\u{25a0}'..'\u{2600}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
#[test]
fn builtin_powerline_glyphs_coverage() {
let offset = Default::default();
let glyph_offset = Default::default();
// Test coverage of box drawing characters.
for character in '\u{e0b0}'..='\u{e0b3}' {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_some());
}
for character in ('\u{e0a0}'..'\u{e0b0}').chain('\u{e0b4}'..'\u{e0c0}') {
assert!(builtin_glyph(character, &METRICS, &offset, &glyph_offset).is_none());
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Metrics {\n pub average_advance: f64,\n pub line_height: f64,\n pub descent: f32,\n pub underline_position: f32,\n pub underline_thickness: f32,\n pub strikeout_position: f32,\n pub strikeout_thickness: f32,\n}"
],
"name": "metrics",
"type": "&Metrics"
},
{
"definitions": [
"pub struct Delta<T: Default> {\n /// Horizontal change.\n pub x: T,\n /// Vertical change.\n pub y: T,\n}"
],
"name": "offset",
"type": "&Delta<i8>"
}
],
"end_line": 661,
"name": "powerline_drawing",
"signature": "fn powerline_drawing(\n character: char,\n metrics: &Metrics,\n offset: &Delta<i8>,\n) -> Option<RasterizedGlyph>",
"start_line": 590
} | {
"class_name": "",
"class_signature": ""
} |
get | alacritty-master/alacritty/src/renderer/text/glyph_cache.rs | pub fn get(&mut self, glyph_key: GlyphKey, loader: &mut L, show_missing: bool) -> Glyph {
// Try to load glyph from cache.
if let Some(glyph) = self.cache.get(&glyph_key) {
return *glyph;
};
// Rasterize the glyph using the built-in font for special characters or the user's font
// for everything else.
let rasterized = self
.builtin_box_drawing
.then(|| {
builtin_font::builtin_glyph(
glyph_key.character,
&self.metrics,
&self.font_offset,
&self.glyph_offset,
)
})
.flatten()
.map_or_else(|| self.rasterizer.get_glyph(glyph_key), Ok);
let glyph = match rasterized {
Ok(rasterized) => self.load_glyph(loader, rasterized),
// Load fallback glyph.
Err(RasterizerError::MissingGlyph(rasterized)) if show_missing => {
// Use `\0` as "missing" glyph to cache it only once.
let missing_key = GlyphKey { character: '\0', ..glyph_key };
if let Some(glyph) = self.cache.get(&missing_key) {
*glyph
} else {
// If no missing glyph was loaded yet, insert it as `\0`.
let glyph = self.load_glyph(loader, rasterized);
self.cache.insert(missing_key, glyph);
glyph
}
},
Err(_) => self.load_glyph(loader, Default::default()),
};
// Cache rasterized glyph.
*self.cache.entry(glyph_key).or_insert(glyph)
} | use std::collections::HashMap;
use ahash::RandomState;
use crossfont::{
Error as RasterizerError, FontDesc, FontKey, GlyphKey, Metrics, Rasterize, RasterizedGlyph,
Rasterizer, Size, Slant, Style, Weight,
};
use log::{error, info};
use unicode_width::UnicodeWidthChar;
use crate::config::font::{Font, FontDescription};
use crate::config::ui_config::Delta;
use crate::gl::types::*;
use super::builtin_font;
/// `LoadGlyph` allows for copying a rasterized glyph into graphics memory.
pub trait LoadGlyph {
/// Load the rasterized glyph into GPU memory.
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph;
/// Clear any state accumulated from previous loaded glyphs.
///
/// This can, for instance, be used to reset the texture Atlas.
fn clear(&mut self);
}
#[derive(Copy, Clone, Debug)]
pub struct Glyph {
pub tex_id: GLuint,
pub multicolor: bool,
pub top: i16,
pub left: i16,
pub width: i16,
pub height: i16,
pub uv_bot: f32,
pub uv_left: f32,
pub uv_width: f32,
pub uv_height: f32,
}
/// Naïve glyph cache.
///
/// Currently only keyed by `char`, and thus not possible to hold different
/// representations of the same code point.
pub struct GlyphCache {
/// Cache of buffered glyphs.
cache: HashMap<GlyphKey, Glyph, RandomState>,
/// Rasterizer for loading new glyphs.
rasterizer: Rasterizer,
/// Regular font.
pub font_key: FontKey,
/// Bold font.
pub bold_key: FontKey,
/// Italic font.
pub italic_key: FontKey,
/// Bold italic font.
pub bold_italic_key: FontKey,
/// Font size.
pub font_size: crossfont::Size,
/// Font offset.
font_offset: Delta<i8>,
/// Glyph offset.
glyph_offset: Delta<i8>,
/// Font metrics.
metrics: Metrics,
/// Whether to use the built-in font for box drawing characters.
builtin_box_drawing: bool,
}
impl GlyphCache {
pub fn new(mut rasterizer: Rasterizer, font: &Font) -> Result<GlyphCache, crossfont::Error> {
let (regular, bold, italic, bold_italic) = Self::compute_font_keys(font, &mut rasterizer)?;
let metrics = GlyphCache::load_font_metrics(&mut rasterizer, font, regular)?;
Ok(Self {
cache: Default::default(),
rasterizer,
font_size: font.size(),
font_key: regular,
bold_key: bold,
italic_key: italic,
bold_italic_key: bold_italic,
font_offset: font.offset,
glyph_offset: font.glyph_offset,
metrics,
builtin_box_drawing: font.builtin_box_drawing,
})
}
// Load font metrics and adjust for glyph offset.
fn load_font_metrics(
rasterizer: &mut Rasterizer,
font: &Font,
key: FontKey,
) -> Result<Metrics, crossfont::Error> {
// Need to load at least one glyph for the face before calling metrics.
// The glyph requested here ('m' at the time of writing) has no special
// meaning.
rasterizer.get_glyph(GlyphKey { font_key: key, character: 'm', size: font.size() })?;
let mut metrics = rasterizer.metrics(key, font.size())?;
metrics.strikeout_position += font.glyph_offset.y as f32;
Ok(metrics)
}
fn load_glyphs_for_font<L: LoadGlyph>(&mut self, font: FontKey, loader: &mut L) {
let size = self.font_size;
// Cache all ascii characters.
for i in 32u8..=126u8 {
self.get(GlyphKey { font_key: font, character: i as char, size }, loader, true);
}
}
/// Computes font keys for (Regular, Bold, Italic, Bold Italic).
fn compute_font_keys(
font: &Font,
rasterizer: &mut Rasterizer,
) -> Result<(FontKey, FontKey, FontKey, FontKey), crossfont::Error> {
let size = font.size();
// Load regular font.
let regular_desc = Self::make_desc(font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(rasterizer, ®ular_desc, size)?;
// Helper to load a description if it is not the `regular_desc`.
let mut load_or_regular = |desc: FontDesc| {
if desc == regular_desc {
regular
} else {
rasterizer.load_font(&desc, size).unwrap_or(regular)
}
};
// Load bold font.
let bold_desc = Self::make_desc(&font.bold(), Slant::Normal, Weight::Bold);
let bold = load_or_regular(bold_desc);
// Load italic font.
let italic_desc = Self::make_desc(&font.italic(), Slant::Italic, Weight::Normal);
let italic = load_or_regular(italic_desc);
// Load bold italic font.
let bold_italic_desc = Self::make_desc(&font.bold_italic(), Slant::Italic, Weight::Bold);
let bold_italic = load_or_regular(bold_italic_desc);
Ok((regular, bold, italic, bold_italic))
}
fn load_regular_font(
rasterizer: &mut Rasterizer,
description: &FontDesc,
size: Size,
) -> Result<FontKey, crossfont::Error> {
match rasterizer.load_font(description, size) {
Ok(font) => Ok(font),
Err(err) => {
error!("{}", err);
let fallback_desc =
Self::make_desc(Font::default().normal(), Slant::Normal, Weight::Normal);
rasterizer.load_font(&fallback_desc, size)
},
}
}
fn make_desc(desc: &FontDescription, slant: Slant, weight: Weight) -> FontDesc {
let style = if let Some(ref spec) = desc.style {
Style::Specific(spec.to_owned())
} else {
Style::Description { slant, weight }
};
FontDesc::new(desc.family.clone(), style)
}
/// Get a glyph from the font.
///
/// If the glyph has never been loaded before, it will be rasterized and inserted into the
/// cache.
///
/// # Errors
///
/// This will fail when the glyph could not be rasterized. Usually this is due to the glyph
/// not being present in any font.
pub fn get<L>(&mut self, glyph_key: GlyphKey, loader: &mut L, show_missing: bool) -> Glyph
where
L: LoadGlyph + ?Sized,
{
// Try to load glyph from cache.
if let Some(glyph) = self.cache.get(&glyph_key) {
return *glyph;
};
// Rasterize the glyph using the built-in font for special characters or the user's font
// for everything else.
let rasterized = self
.builtin_box_drawing
.then(|| {
builtin_font::builtin_glyph(
glyph_key.character,
&self.metrics,
&self.font_offset,
&self.glyph_offset,
)
})
.flatten()
.map_or_else(|| self.rasterizer.get_glyph(glyph_key), Ok);
let glyph = match rasterized {
Ok(rasterized) => self.load_glyph(loader, rasterized),
// Load fallback glyph.
Err(RasterizerError::MissingGlyph(rasterized)) if show_missing => {
// Use `\0` as "missing" glyph to cache it only once.
let missing_key = GlyphKey { character: '\0', ..glyph_key };
if let Some(glyph) = self.cache.get(&missing_key) {
*glyph
} else {
// If no missing glyph was loaded yet, insert it as `\0`.
let glyph = self.load_glyph(loader, rasterized);
self.cache.insert(missing_key, glyph);
glyph
}
},
Err(_) => self.load_glyph(loader, Default::default()),
};
// Cache rasterized glyph.
*self.cache.entry(glyph_key).or_insert(glyph)
}
/// Load glyph into the atlas.
///
/// This will apply all transforms defined for the glyph cache to the rasterized glyph before
pub fn load_glyph<L>(&self, loader: &mut L, mut glyph: RasterizedGlyph) -> Glyph
where
L: LoadGlyph + ?Sized,
{
glyph.left += i32::from(self.glyph_offset.x);
glyph.top += i32::from(self.glyph_offset.y);
glyph.top -= self.metrics.descent as i32;
// The metrics of zero-width characters are based on rendering
// the character after the current cell, with the anchor at the
// right side of the preceding character. Since we render the
// zero-width characters inside the preceding character, the
// anchor has been moved to the right by one cell.
if glyph.character.width() == Some(0) {
glyph.left += self.metrics.average_advance as i32;
}
// Add glyph to cache.
loader.load_glyph(&glyph)
}
/// Reset currently cached data in both GL and the registry to default state.
pub fn reset_glyph_cache<L: LoadGlyph>(&mut self, loader: &mut L) {
loader.clear();
self.cache = Default::default();
self.load_common_glyphs(loader);
}
/// Update the inner font size.
///
/// NOTE: To reload the renderers's fonts [`Self::reset_glyph_cache`] should be called
/// afterwards.
pub fn update_font_size(&mut self, font: &Font) -> Result<(), crossfont::Error> {
// Update dpi scaling.
self.font_offset = font.offset;
self.glyph_offset = font.glyph_offset;
// Recompute font keys.
let (regular, bold, italic, bold_italic) =
Self::compute_font_keys(font, &mut self.rasterizer)?;
let metrics = GlyphCache::load_font_metrics(&mut self.rasterizer, font, regular)?;
info!("Font size changed to {:?} px", font.size().as_px());
self.font_size = font.size();
self.font_key = regular;
self.bold_key = bold;
self.italic_key = italic;
self.bold_italic_key = bold_italic;
self.metrics = metrics;
self.builtin_box_drawing = font.builtin_box_drawing;
Ok(())
}
pub fn font_metrics(&self) -> crossfont::Metrics {
self.metrics
}
/// Prefetch glyphs that are almost guaranteed to be loaded anyways.
pub fn load_common_glyphs<L: LoadGlyph>(&mut self, loader: &mut L) {
self.load_glyphs_for_font(self.font_key, loader);
self.load_glyphs_for_font(self.bold_key, loader);
self.load_glyphs_for_font(self.italic_key, loader);
self.load_glyphs_for_font(self.bold_italic_key, loader);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct GlyphKey {\n pub character: char,\n pub font_key: FontKey,\n pub size: Size,\n}"
],
"name": "glyph_key",
"type": "GlyphKey"
}
],
"end_line": 245,
"name": "get",
"signature": "pub fn get(&mut self, glyph_key: GlyphKey, loader: &mut L, show_missing: bool) -> Glyph",
"start_line": 200
} | {
"class_name": "impl GlyphCache {\n pub fn new(mut rasterizer: Rasterizer, font: &Font) -> Result<GlyphCache, crossfont::Error> {\n let (regular, bold, italic, bold_italic) = Self::compute_font_keys(font, &mut rasterizer)?;\n\n let metrics = GlyphCache::load_font_metrics(&mut rasterizer, font, regular)?;\n Ok(Self {\n cache: Default::default(),\n rasterizer,\n font_size: font.size(),\n font_key: regular,\n bold_key: bold,\n italic_key: italic,\n bold_italic_key: bold_italic,\n font_offset: font.offset,\n glyph_offset: font.glyph_offset,\n metrics,\n builtin_box_drawing: font.builtin_box_drawing,\n })\n }\n\n // Load font metrics and adjust for glyph offset.\n fn load_font_metrics(\n rasterizer: &mut Rasterizer,\n font: &Font,\n key: FontKey,\n ) -> Result<Metrics, crossfont::Error> {\n // Need to load at least one glyph for the face before calling metrics.\n // The glyph requested here ('m' at the time of writing) has no special\n // meaning.\n rasterizer.get_glyph(GlyphKey { font_key: key, character: 'm', size: font.size() })?;\n\n let mut metrics = rasterizer.metrics(key, font.size())?;\n metrics.strikeout_position += font.glyph_offset.y as f32;\n Ok(metrics)\n }\n\n fn load_glyphs_for_font<L: LoadGlyph>(&mut self, font: FontKey, loader: &mut L) {\n let size = self.font_size;\n\n // Cache all ascii characters.\n for i in 32u8..=126u8 {\n self.get(GlyphKey { font_key: font, character: i as char, size }, loader, true);\n }\n }\n\n /// Computes font keys for (Regular, Bold, Italic, Bold Italic).\n fn compute_font_keys(\n font: &Font,\n rasterizer: &mut Rasterizer,\n ) -> Result<(FontKey, FontKey, FontKey, FontKey), crossfont::Error> {\n let size = font.size();\n\n // Load regular font.\n let regular_desc = Self::make_desc(font.normal(), Slant::Normal, Weight::Normal);\n\n let regular = Self::load_regular_font(rasterizer, ®ular_desc, size)?;\n\n // Helper to load a description if it is not the `regular_desc`.\n let mut load_or_regular = |desc: FontDesc| {\n if desc == regular_desc {\n regular\n } else {\n rasterizer.load_font(&desc, size).unwrap_or(regular)\n }\n };\n\n // Load bold font.\n let bold_desc = Self::make_desc(&font.bold(), Slant::Normal, Weight::Bold);\n\n let bold = load_or_regular(bold_desc);\n\n // Load italic font.\n let italic_desc = Self::make_desc(&font.italic(), Slant::Italic, Weight::Normal);\n\n let italic = load_or_regular(italic_desc);\n\n // Load bold italic font.\n let bold_italic_desc = Self::make_desc(&font.bold_italic(), Slant::Italic, Weight::Bold);\n\n let bold_italic = load_or_regular(bold_italic_desc);\n\n Ok((regular, bold, italic, bold_italic))\n }\n\n fn load_regular_font(\n rasterizer: &mut Rasterizer,\n description: &FontDesc,\n size: Size,\n ) -> Result<FontKey, crossfont::Error> {\n match rasterizer.load_font(description, size) {\n Ok(font) => Ok(font),\n Err(err) => {\n error!(\"{}\", err);\n\n let fallback_desc =\n Self::make_desc(Font::default().normal(), Slant::Normal, Weight::Normal);\n rasterizer.load_font(&fallback_desc, size)\n },\n }\n }\n\n fn make_desc(desc: &FontDescription, slant: Slant, weight: Weight) -> FontDesc {\n let style = if let Some(ref spec) = desc.style {\n Style::Specific(spec.to_owned())\n } else {\n Style::Description { slant, weight }\n };\n FontDesc::new(desc.family.clone(), style)\n }\n\n /// Get a glyph from the font.\n ///\n /// If the glyph has never been loaded before, it will be rasterized and inserted into the\n /// cache.\n ///\n /// # Errors\n ///\n /// This will fail when the glyph could not be rasterized. Usually this is due to the glyph\n /// not being present in any font.\n pub fn get<L>(&mut self, glyph_key: GlyphKey, loader: &mut L, show_missing: bool) -> Glyph\n where\n L: LoadGlyph + ?Sized,\n {\n // Try to load glyph from cache.\n if let Some(glyph) = self.cache.get(&glyph_key) {\n return *glyph;\n };\n\n // Rasterize the glyph using the built-in font for special characters or the user's font\n // for everything else.\n let rasterized = self\n .builtin_box_drawing\n .then(|| {\n builtin_font::builtin_glyph(\n glyph_key.character,\n &self.metrics,\n &self.font_offset,\n &self.glyph_offset,\n )\n })\n .flatten()\n .map_or_else(|| self.rasterizer.get_glyph(glyph_key), Ok);\n\n let glyph = match rasterized {\n Ok(rasterized) => self.load_glyph(loader, rasterized),\n // Load fallback glyph.\n Err(RasterizerError::MissingGlyph(rasterized)) if show_missing => {\n // Use `\\0` as \"missing\" glyph to cache it only once.\n let missing_key = GlyphKey { character: '\\0', ..glyph_key };\n if let Some(glyph) = self.cache.get(&missing_key) {\n *glyph\n } else {\n // If no missing glyph was loaded yet, insert it as `\\0`.\n let glyph = self.load_glyph(loader, rasterized);\n self.cache.insert(missing_key, glyph);\n\n glyph\n }\n },\n Err(_) => self.load_glyph(loader, Default::default()),\n };\n\n // Cache rasterized glyph.\n *self.cache.entry(glyph_key).or_insert(glyph)\n }\n\n /// Load glyph into the atlas.\n ///\n /// This will apply all transforms defined for the glyph cache to the rasterized glyph before\n pub fn load_glyph<L>(&self, loader: &mut L, mut glyph: RasterizedGlyph) -> Glyph\n where\n L: LoadGlyph + ?Sized,\n {\n glyph.left += i32::from(self.glyph_offset.x);\n glyph.top += i32::from(self.glyph_offset.y);\n glyph.top -= self.metrics.descent as i32;\n\n // The metrics of zero-width characters are based on rendering\n // the character after the current cell, with the anchor at the\n // right side of the preceding character. Since we render the\n // zero-width characters inside the preceding character, the\n // anchor has been moved to the right by one cell.\n if glyph.character.width() == Some(0) {\n glyph.left += self.metrics.average_advance as i32;\n }\n\n // Add glyph to cache.\n loader.load_glyph(&glyph)\n }\n\n /// Reset currently cached data in both GL and the registry to default state.\n pub fn reset_glyph_cache<L: LoadGlyph>(&mut self, loader: &mut L) {\n loader.clear();\n self.cache = Default::default();\n\n self.load_common_glyphs(loader);\n }\n\n /// Update the inner font size.\n ///\n /// NOTE: To reload the renderers's fonts [`Self::reset_glyph_cache`] should be called\n /// afterwards.\n pub fn update_font_size(&mut self, font: &Font) -> Result<(), crossfont::Error> {\n // Update dpi scaling.\n self.font_offset = font.offset;\n self.glyph_offset = font.glyph_offset;\n\n // Recompute font keys.\n let (regular, bold, italic, bold_italic) =\n Self::compute_font_keys(font, &mut self.rasterizer)?;\n\n let metrics = GlyphCache::load_font_metrics(&mut self.rasterizer, font, regular)?;\n\n info!(\"Font size changed to {:?} px\", font.size().as_px());\n\n self.font_size = font.size();\n self.font_key = regular;\n self.bold_key = bold;\n self.italic_key = italic;\n self.bold_italic_key = bold_italic;\n self.metrics = metrics;\n self.builtin_box_drawing = font.builtin_box_drawing;\n\n Ok(())\n }\n\n pub fn font_metrics(&self) -> crossfont::Metrics {\n self.metrics\n }\n\n /// Prefetch glyphs that are almost guaranteed to be loaded anyways.\n pub fn load_common_glyphs<L: LoadGlyph>(&mut self, loader: &mut L) {\n self.load_glyphs_for_font(self.font_key, loader);\n self.load_glyphs_for_font(self.bold_key, loader);\n self.load_glyphs_for_font(self.italic_key, loader);\n self.load_glyphs_for_font(self.bold_italic_key, loader);\n }\n}",
"class_signature": "impl GlyphCache"
} |
with_api | alacritty-master/alacritty/src/renderer/text/glsl3.rs | fn with_api(&'b mut self, size_info: &'b SizeInfo, func: F) -> T {
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
} | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
pub const TEXT_SHADER_F: &str = include_str!("../../../res/glsl3/text.f.glsl");
const TEXT_SHADER_V: &str = include_str!("../../../res/glsl3/text.v.glsl");
/// Maximum items to be drawn in a batch.
const BATCH_MAX: usize = 0x1_0000;
#[derive(Debug)]
pub struct Glsl3Renderer {
program: TextShaderProgram,
vao: GLuint,
ebo: GLuint,
vbo_instance: GLuint,
atlas: Vec<Atlas>,
current_atlas: usize,
active_tex: GLuint,
batch: Batch,
}
impl Glsl3Renderer {
pub fn new() -> Result<Self, Error> {
info!("Using OpenGL 3.3 renderer");
let program = TextShaderProgram::new(ShaderVersion::Glsl3)?;
let mut vao: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vbo_instance: GLuint = 0;
unsafe {
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
// Disable depth mask, as the renderer never uses depth tests.
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo_instance);
gl::BindVertexArray(vao);
// ---------------------
// Set up element buffer
// ---------------------
let indices: [u32; 6] = [0, 1, 3, 1, 2, 3];
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(6 * size_of::<u32>()) as isize,
indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// ----------------------------
// Setup vertex instance buffer
// ----------------------------
gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<InstanceData>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<InstanceData>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
gl::VertexAttribDivisor(index, 1);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Coords.
add_attr!(2, gl::UNSIGNED_SHORT, u16);
// Glyph offset and size.
add_attr!(4, gl::SHORT, i16);
// UV offset.
add_attr!(4, gl::FLOAT, f32);
// Color and cell flags.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a
// huge margin.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
ebo,
vbo_instance,
atlas: vec![Atlas::new(ATLAS_SIZE, false)],
current_atlas: 0,
active_tex: 0,
batch: Batch::new(),
})
}
}
impl<'a> TextRenderer<'a> for Glsl3Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn program(&self) -> &Self::Shader {
&self.program
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
impl Drop for Glsl3Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo_instance);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
}
impl TextRenderApi<Batch> for RenderApi<'_> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.instances.as_ptr() as *const _,
);
}
// Bind texture if necessary.
if *self.active_tex != self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
self.program.set_rendering_pass(RenderingPass::Background);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
}
self.batch.clear();
}
}
impl LoadGlyph for RenderApi<'_> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl Drop for RenderApi<'_> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
#[derive(Debug)]
#[repr(C)]
struct InstanceData {
// Coords.
col: u16,
row: u16,
// Glyph offset.
left: i16,
top: i16,
// Glyph size.
width: i16,
height: i16,
// UV offset.
uv_left: f32,
uv_bot: f32,
// uv scale.
uv_width: f32,
uv_height: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Cell flags like multicolor or fullwidth character.
cell_flags: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug, Default)]
pub struct Batch {
tex: GLuint,
instances: Vec<InstanceData>,
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
let mut cell_flags = RenderingGlyphFlags::empty();
cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor);
cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR));
self.instances.push(InstanceData {
col: cell.point.column.0 as u16,
row: cell.point.line as u16,
top: glyph.top,
left: glyph.left,
width: glyph.width,
height: glyph.height,
uv_bot: glyph.uv_bot,
uv_left: glyph.uv_left,
uv_width: glyph.uv_width,
uv_height: glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
cell_flags,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
});
}
}
impl Batch {
#[inline]
pub fn new() -> Self {
Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
pub fn len(&self) -> usize {
self.instances.len()
}
#[inline]
pub fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
pub fn size(&self) -> usize {
self.len() * size_of::<InstanceData>()
}
pub fn clear(&mut self) {
self.tex = 0;
self.instances.clear();
}
}
/// Text drawing program.
///
/// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a".
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Cell dimensions (pixels).
u_cell_dim: GLint,
/// Background pass flag.
///
/// Rendering is split into two passes; one for backgrounds, and one for text.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion) -> Result<TextShaderProgram, Error> {
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, TEXT_SHADER_F)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_cell_dim: program.get_uniform_location(cstr!("cellDim"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_term_uniforms(&self, props: &SizeInfo) {
unsafe {
gl::Uniform2f(self.u_cell_dim, props.cell_width(), props.cell_height());
}
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
let value = match rendering_pass {
RenderingPass::Background | RenderingPass::SubpixelPass1 => rendering_pass as i32,
_ => unreachable!("provided pass is not supported in GLSL3 renderer"),
};
unsafe {
gl::Uniform1i(self.u_rendering_pass, value);
}
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
}
| rust | {
"argument_definitions": [],
"end_line": 184,
"name": "with_api",
"signature": "fn with_api(&'b mut self, size_info: &'b SizeInfo, func: F) -> T",
"start_line": 153
} | {
"class_name": "impl<'a> TextRenderer<'a> for Glsl3Renderer {\n type RenderApi = RenderApi<'a>;\n type RenderBatch = Batch;\n type Shader = TextShaderProgram;\n\n fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T\n where\n F: FnOnce(Self::RenderApi) -> T,\n {\n unsafe {\n gl::UseProgram(self.program.id());\n self.program.set_term_uniforms(size_info);\n\n gl::BindVertexArray(self.vao);\n gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);\n gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);\n gl::ActiveTexture(gl::TEXTURE0);\n }\n\n let res = func(RenderApi {\n active_tex: &mut self.active_tex,\n batch: &mut self.batch,\n atlas: &mut self.atlas,\n current_atlas: &mut self.current_atlas,\n program: &mut self.program,\n });\n\n unsafe {\n gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);\n gl::BindBuffer(gl::ARRAY_BUFFER, 0);\n gl::BindVertexArray(0);\n\n gl::UseProgram(0);\n }\n\n res\n }\n\n fn program(&self) -> &Self::Shader {\n &self.program\n }\n\n fn loader_api(&mut self) -> LoaderApi<'_> {\n LoaderApi {\n active_tex: &mut self.active_tex,\n atlas: &mut self.atlas,\n current_atlas: &mut self.current_atlas,\n }\n }\n}",
"class_signature": "impl<'a> TextRenderer<'a> for Glsl3Renderer"
} |
with_api | alacritty-master/alacritty/src/renderer/text/gles2.rs | fn with_api(&'b mut self, _: &'b SizeInfo, func: F) -> T {
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
} | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error, GlExtensions};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
glsl3, Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
const TEXT_SHADER_F: &str = include_str!("../../../res/gles2/text.f.glsl");
const TEXT_SHADER_V: &str = include_str!("../../../res/gles2/text.v.glsl");
#[derive(Debug)]
pub struct Gles2Renderer {
program: TextShaderProgram,
vao: GLuint,
vbo: GLuint,
ebo: GLuint,
atlas: Vec<Atlas>,
batch: Batch,
current_atlas: usize,
active_tex: GLuint,
dual_source_blending: bool,
}
impl Gles2Renderer {
pub fn new(allow_dsb: bool, is_gles_context: bool) -> Result<Self, Error> {
info!("Using OpenGL ES 2.0 renderer");
let dual_source_blending = allow_dsb
&& (GlExtensions::contains("GL_EXT_blend_func_extended")
|| GlExtensions::contains("GL_ARB_blend_func_extended"));
if is_gles_context {
info!("Running on OpenGL ES context");
}
if dual_source_blending {
info!("Using dual source blending");
}
let program = TextShaderProgram::new(ShaderVersion::Gles2, dual_source_blending)?;
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vertex_indices = Vec::with_capacity(BATCH_MAX / 4 * 6);
for index in 0..(BATCH_MAX / 4) as u16 {
let index = index * 4;
vertex_indices.push(index);
vertex_indices.push(index + 1);
vertex_indices.push(index + 3);
vertex_indices.push(index + 1);
vertex_indices.push(index + 2);
vertex_indices.push(index + 3);
}
unsafe {
gl::Enable(gl::BLEND);
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// Elements buffer.
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(vertex_indices.capacity() * size_of::<u16>()) as isize,
vertex_indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// Vertex buffer.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<TextVertex>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<TextVertex>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Cell coords.
add_attr!(2, gl::SHORT, i16);
// Glyph coords.
add_attr!(2, gl::SHORT, i16);
// UV.
add_attr!(2, gl::FLOAT, u32);
// Color and bitmap color.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
vbo,
ebo,
atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)],
batch: Batch::new(),
current_atlas: 0,
active_tex: 0,
dual_source_blending,
})
}
}
impl Drop for Gles2Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
impl<'a> TextRenderer<'a> for Gles2Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn program(&self) -> &Self::Shader {
&self.program
}
fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
/// Maximum items to be drawn in a batch.
///
/// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph),
/// since it's the maximum possible index in `glDrawElements` in GLES2.
const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize;
#[derive(Debug)]
pub struct Batch {
tex: GLuint,
vertices: Vec<TextVertex>,
}
impl Batch {
fn new() -> Self {
Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
fn len(&self) -> usize {
self.vertices.len()
}
#[inline]
fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
fn size(&self) -> usize {
self.len() * size_of::<TextVertex>()
}
#[inline]
fn clear(&mut self) {
self.vertices.clear();
}
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
// Calculate the cell position.
let x = cell.point.column.0 as i16 * size_info.cell_width() as i16;
let y = cell.point.line as i16 * size_info.cell_height() as i16;
// Calculate the glyph position.
let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left;
let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top;
let colored = if glyph.multicolor {
RenderingGlyphFlags::COLORED
} else {
RenderingGlyphFlags::empty()
};
let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 };
let mut vertex = TextVertex {
x,
y: y + size_info.cell_height() as i16,
glyph_x,
glyph_y: glyph_y + glyph.height,
u: glyph.uv_left,
v: glyph.uv_bot + glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
colored,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
};
self.vertices.push(vertex);
vertex.y = y;
vertex.glyph_y = glyph_y;
vertex.u = glyph.uv_left;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.y = y + size_info.cell_height() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.glyph_y = glyph_y + glyph.height;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot + glyph.uv_height;
self.vertices.push(vertex);
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
dual_source_blending: bool,
}
impl Drop for RenderApi<'_> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
impl LoadGlyph for RenderApi<'_> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl TextRenderApi<Batch> for RenderApi<'_> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.vertices.as_ptr() as *const _,
);
}
if *self.active_tex != self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
let num_indices = (self.batch.len() / 4 * 6) as i32;
// The rendering is inspired by
// https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md.
// Draw background.
self.program.set_rendering_pass(RenderingPass::Background);
gl::BlendFunc(gl::ONE, gl::ZERO);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
if self.dual_source_blending {
// Text rendering pass.
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
} else {
// First text rendering pass.
gl::BlendFuncSeparate(gl::ZERO, gl::ONE_MINUS_SRC_COLOR, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Second text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass2);
gl::BlendFuncSeparate(gl::ONE_MINUS_DST_ALPHA, gl::ONE, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Third text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass3);
gl::BlendFuncSeparate(gl::ONE, gl::ONE, gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
}
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
}
self.batch.clear();
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct TextVertex {
// Cell coordinates.
x: i16,
y: i16,
// Glyph coordinates.
glyph_x: i16,
glyph_y: i16,
// Offsets into Atlas.
u: f32,
v: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Whether the glyph is colored.
colored: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Rendering pass.
///
/// For dual source blending, there are 2 passes; one for background, another for text,
/// similar to the GLSL3 renderer.
///
/// If GL_EXT_blend_func_extended is not available, the rendering is split into 4 passes.
/// One is used for the background and the rest to perform subpixel text rendering according to
/// <https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md>.
///
/// Rendering is split into three passes.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion, dual_source_blending: bool) -> Result<Self, Error> {
let fragment_shader =
if dual_source_blending { &glsl3::TEXT_SHADER_F } else { &TEXT_SHADER_F };
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, fragment_shader)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
unsafe { gl::Uniform1i(self.u_rendering_pass, rendering_pass as i32) }
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
}
| rust | {
"argument_definitions": [],
"end_line": 210,
"name": "with_api",
"signature": "fn with_api(&'b mut self, _: &'b SizeInfo, func: F) -> T",
"start_line": 180
} | {
"class_name": "impl<'a> TextRenderer<'a> for Gles2Renderer {\n type RenderApi = RenderApi<'a>;\n type RenderBatch = Batch;\n type Shader = TextShaderProgram;\n\n fn program(&self) -> &Self::Shader {\n &self.program\n }\n\n fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T\n where\n F: FnOnce(Self::RenderApi) -> T,\n {\n unsafe {\n gl::UseProgram(self.program.id());\n gl::BindVertexArray(self.vao);\n gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);\n gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);\n gl::ActiveTexture(gl::TEXTURE0);\n }\n\n let res = func(RenderApi {\n active_tex: &mut self.active_tex,\n batch: &mut self.batch,\n atlas: &mut self.atlas,\n current_atlas: &mut self.current_atlas,\n program: &mut self.program,\n dual_source_blending: self.dual_source_blending,\n });\n\n unsafe {\n gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);\n gl::BindBuffer(gl::ARRAY_BUFFER, 0);\n gl::BindVertexArray(0);\n\n gl::UseProgram(0);\n }\n\n res\n }\n\n fn loader_api(&mut self) -> LoaderApi<'_> {\n LoaderApi {\n active_tex: &mut self.active_tex,\n atlas: &mut self.atlas,\n current_atlas: &mut self.current_atlas,\n }\n }\n}",
"class_signature": "impl<'a> TextRenderer<'a> for Gles2Renderer"
} |
new | alacritty-master/alacritty/src/config/monitor.rs | pub fn new(mut paths: Vec<PathBuf>, event_proxy: EventLoopProxy<Event>) -> Option<Self> {
// Don't monitor config if there is no path to watch.
if paths.is_empty() {
return None;
}
// Calculate the hash for the unmodified list of paths.
let watched_hash = Self::hash_paths(&paths);
// Exclude char devices like `/dev/null`, sockets, and so on, by checking that file type is
// a regular file.
paths.retain(|path| {
// Call `metadata` to resolve symbolic links.
path.metadata().is_ok_and(|metadata| metadata.file_type().is_file())
});
// Canonicalize paths, keeping the base paths for symlinks.
for i in 0..paths.len() {
if let Ok(canonical_path) = paths[i].canonicalize() {
match paths[i].symlink_metadata() {
Ok(metadata) if metadata.file_type().is_symlink() => paths.push(canonical_path),
_ => paths[i] = canonical_path,
}
}
}
// The Duration argument is a debouncing period.
let (tx, rx) = mpsc::channel();
let mut watcher = match RecommendedWatcher::new(
tx.clone(),
Config::default().with_poll_interval(FALLBACK_POLLING_TIMEOUT),
) {
Ok(watcher) => watcher,
Err(err) => {
error!("Unable to watch config file: {}", err);
return None;
},
};
let join_handle = thread::spawn_named("config watcher", move || {
// Get all unique parent directories.
let mut parents = paths
.iter()
.map(|path| {
let mut path = path.clone();
path.pop();
path
})
.collect::<Vec<PathBuf>>();
parents.sort_unstable();
parents.dedup();
// Watch all configuration file directories.
for parent in &parents {
if let Err(err) = watcher.watch(parent, RecursiveMode::NonRecursive) {
debug!("Unable to watch config directory {:?}: {}", parent, err);
}
}
// The current debouncing time.
let mut debouncing_deadline: Option<Instant> = None;
// The events accumulated during the debounce period.
let mut received_events = Vec::new();
loop {
// We use `recv_timeout` to debounce the events coming from the watcher and reduce
// the amount of config reloads.
let event = match debouncing_deadline.as_ref() {
Some(debouncing_deadline) => rx.recv_timeout(
debouncing_deadline.saturating_duration_since(Instant::now()),
),
None => {
let event = rx.recv().map_err(Into::into);
// Set the debouncing deadline after receiving the event.
debouncing_deadline = Some(Instant::now() + DEBOUNCE_DELAY);
event
},
};
match event {
Ok(Ok(event)) => match event.kind {
EventKind::Other if event.info() == Some("shutdown") => break,
EventKind::Any
| EventKind::Create(_)
| EventKind::Modify(_)
| EventKind::Other => {
received_events.push(event);
},
_ => (),
},
Err(RecvTimeoutError::Timeout) => {
// Go back to polling the events.
debouncing_deadline = None;
if received_events
.drain(..)
.flat_map(|event| event.paths.into_iter())
.any(|path| paths.contains(&path))
{
// Always reload the primary configuration file.
let event = Event::new(EventType::ConfigReload(paths[0].clone()), None);
let _ = event_proxy.send_event(event);
}
},
Ok(Err(err)) => {
debug!("Config watcher errors: {:?}", err);
},
Err(err) => {
debug!("Config watcher channel dropped unexpectedly: {}", err);
break;
},
};
}
});
Some(Self { watched_hash, thread: join_handle, shutdown_tx: tx })
} | use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::path::PathBuf;
use std::sync::mpsc::{self, RecvTimeoutError, Sender};
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use log::{debug, error, warn};
use notify::{
Config, Error as NotifyError, Event as NotifyEvent, EventKind, RecommendedWatcher,
RecursiveMode, Watcher,
};
use winit::event_loop::EventLoopProxy;
use alacritty_terminal::thread;
use crate::event::{Event, EventType};
const DEBOUNCE_DELAY: Duration = Duration::from_millis(10);
/// The fallback for `RecommendedWatcher` polling.
const FALLBACK_POLLING_TIMEOUT: Duration = Duration::from_secs(1);
/// Config file update monitor.
pub struct ConfigMonitor {
thread: JoinHandle<()>,
shutdown_tx: Sender<Result<NotifyEvent, NotifyError>>,
watched_hash: Option<u64>,
}
impl ConfigMonitor {
pub fn new(mut paths: Vec<PathBuf>, event_proxy: EventLoopProxy<Event>) -> Option<Self> {
// Don't monitor config if there is no path to watch.
if paths.is_empty() {
return None;
}
// Calculate the hash for the unmodified list of paths.
let watched_hash = Self::hash_paths(&paths);
// Exclude char devices like `/dev/null`, sockets, and so on, by checking that file type is
// a regular file.
paths.retain(|path| {
// Call `metadata` to resolve symbolic links.
path.metadata().is_ok_and(|metadata| metadata.file_type().is_file())
});
// Canonicalize paths, keeping the base paths for symlinks.
for i in 0..paths.len() {
if let Ok(canonical_path) = paths[i].canonicalize() {
match paths[i].symlink_metadata() {
Ok(metadata) if metadata.file_type().is_symlink() => paths.push(canonical_path),
_ => paths[i] = canonical_path,
}
}
}
// The Duration argument is a debouncing period.
let (tx, rx) = mpsc::channel();
let mut watcher = match RecommendedWatcher::new(
tx.clone(),
Config::default().with_poll_interval(FALLBACK_POLLING_TIMEOUT),
) {
Ok(watcher) => watcher,
Err(err) => {
error!("Unable to watch config file: {}", err);
return None;
},
};
let join_handle = thread::spawn_named("config watcher", move || {
// Get all unique parent directories.
let mut parents = paths
.iter()
.map(|path| {
let mut path = path.clone();
path.pop();
path
})
.collect::<Vec<PathBuf>>();
parents.sort_unstable();
parents.dedup();
// Watch all configuration file directories.
for parent in &parents {
if let Err(err) = watcher.watch(parent, RecursiveMode::NonRecursive) {
debug!("Unable to watch config directory {:?}: {}", parent, err);
}
}
// The current debouncing time.
let mut debouncing_deadline: Option<Instant> = None;
// The events accumulated during the debounce period.
let mut received_events = Vec::new();
loop {
// We use `recv_timeout` to debounce the events coming from the watcher and reduce
// the amount of config reloads.
let event = match debouncing_deadline.as_ref() {
Some(debouncing_deadline) => rx.recv_timeout(
debouncing_deadline.saturating_duration_since(Instant::now()),
),
None => {
let event = rx.recv().map_err(Into::into);
// Set the debouncing deadline after receiving the event.
debouncing_deadline = Some(Instant::now() + DEBOUNCE_DELAY);
event
},
};
match event {
Ok(Ok(event)) => match event.kind {
EventKind::Other if event.info() == Some("shutdown") => break,
EventKind::Any
| EventKind::Create(_)
| EventKind::Modify(_)
| EventKind::Other => {
received_events.push(event);
},
_ => (),
},
Err(RecvTimeoutError::Timeout) => {
// Go back to polling the events.
debouncing_deadline = None;
if received_events
.drain(..)
.flat_map(|event| event.paths.into_iter())
.any(|path| paths.contains(&path))
{
// Always reload the primary configuration file.
let event = Event::new(EventType::ConfigReload(paths[0].clone()), None);
let _ = event_proxy.send_event(event);
}
},
Ok(Err(err)) => {
debug!("Config watcher errors: {:?}", err);
},
Err(err) => {
debug!("Config watcher channel dropped unexpectedly: {}", err);
break;
},
};
}
});
Some(Self { watched_hash, thread: join_handle, shutdown_tx: tx })
}
/// Synchronously shut down the monitor.
pub fn shutdown(self) {
// Request shutdown.
let mut event = NotifyEvent::new(EventKind::Other);
event = event.set_info("shutdown");
let _ = self.shutdown_tx.send(Ok(event));
// Wait for thread to terminate.
if let Err(err) = self.thread.join() {
warn!("config monitor shutdown failed: {err:?}");
}
}
/// Check if the config monitor needs to be restarted.
///
/// This checks the supplied list of files against the monitored files to determine if a
/// restart is necessary.
pub fn needs_restart(&self, files: &[PathBuf]) -> bool {
Self::hash_paths(files).map_or(true, |hash| Some(hash) == self.watched_hash)
}
/// Generate the hash for a list of paths.
fn hash_paths(files: &[PathBuf]) -> Option<u64> {
// Use file count limit to avoid allocations.
const MAX_PATHS: usize = 1024;
if files.len() > MAX_PATHS {
return None;
}
// Sort files to avoid restart on order change.
let mut sorted_files = [None; MAX_PATHS];
for (i, file) in files.iter().enumerate() {
sorted_files[i] = Some(file);
}
sorted_files.sort_unstable();
// Calculate hash for the paths, regardless of order.
let mut hasher = DefaultHasher::new();
Hash::hash_slice(&sorted_files, &mut hasher);
Some(hasher.finish())
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub struct PathBuf {\n inner: OsString,\n}"
],
"name": "paths",
"type": "Vec<PathBuf>"
},
{
"definitions": [
"pub struct EventLoopProxy<T: 'static> {\n event_loop_proxy: platform_impl::EventLoopProxy<T>,\n}",
"pub struct EventLoopProxy<T: 'static> {\n event_loop_proxy: platform_impl::EventLoopProxy<T>,\n}"
],
"name": "event_proxy",
"type": "EventLoopProxy<Event>"
}
],
"end_line": 149,
"name": "new",
"signature": "pub fn new(mut paths: Vec<PathBuf>, event_proxy: EventLoopProxy<Event>) -> Option<Self>",
"start_line": 32
} | {
"class_name": "impl ConfigMonitor {\n pub fn new(mut paths: Vec<PathBuf>, event_proxy: EventLoopProxy<Event>) -> Option<Self> {\n // Don't monitor config if there is no path to watch.\n if paths.is_empty() {\n return None;\n }\n\n // Calculate the hash for the unmodified list of paths.\n let watched_hash = Self::hash_paths(&paths);\n\n // Exclude char devices like `/dev/null`, sockets, and so on, by checking that file type is\n // a regular file.\n paths.retain(|path| {\n // Call `metadata` to resolve symbolic links.\n path.metadata().is_ok_and(|metadata| metadata.file_type().is_file())\n });\n\n // Canonicalize paths, keeping the base paths for symlinks.\n for i in 0..paths.len() {\n if let Ok(canonical_path) = paths[i].canonicalize() {\n match paths[i].symlink_metadata() {\n Ok(metadata) if metadata.file_type().is_symlink() => paths.push(canonical_path),\n _ => paths[i] = canonical_path,\n }\n }\n }\n\n // The Duration argument is a debouncing period.\n let (tx, rx) = mpsc::channel();\n let mut watcher = match RecommendedWatcher::new(\n tx.clone(),\n Config::default().with_poll_interval(FALLBACK_POLLING_TIMEOUT),\n ) {\n Ok(watcher) => watcher,\n Err(err) => {\n error!(\"Unable to watch config file: {}\", err);\n return None;\n },\n };\n\n let join_handle = thread::spawn_named(\"config watcher\", move || {\n // Get all unique parent directories.\n let mut parents = paths\n .iter()\n .map(|path| {\n let mut path = path.clone();\n path.pop();\n path\n })\n .collect::<Vec<PathBuf>>();\n parents.sort_unstable();\n parents.dedup();\n\n // Watch all configuration file directories.\n for parent in &parents {\n if let Err(err) = watcher.watch(parent, RecursiveMode::NonRecursive) {\n debug!(\"Unable to watch config directory {:?}: {}\", parent, err);\n }\n }\n\n // The current debouncing time.\n let mut debouncing_deadline: Option<Instant> = None;\n\n // The events accumulated during the debounce period.\n let mut received_events = Vec::new();\n\n loop {\n // We use `recv_timeout` to debounce the events coming from the watcher and reduce\n // the amount of config reloads.\n let event = match debouncing_deadline.as_ref() {\n Some(debouncing_deadline) => rx.recv_timeout(\n debouncing_deadline.saturating_duration_since(Instant::now()),\n ),\n None => {\n let event = rx.recv().map_err(Into::into);\n // Set the debouncing deadline after receiving the event.\n debouncing_deadline = Some(Instant::now() + DEBOUNCE_DELAY);\n event\n },\n };\n\n match event {\n Ok(Ok(event)) => match event.kind {\n EventKind::Other if event.info() == Some(\"shutdown\") => break,\n EventKind::Any\n | EventKind::Create(_)\n | EventKind::Modify(_)\n | EventKind::Other => {\n received_events.push(event);\n },\n _ => (),\n },\n Err(RecvTimeoutError::Timeout) => {\n // Go back to polling the events.\n debouncing_deadline = None;\n\n if received_events\n .drain(..)\n .flat_map(|event| event.paths.into_iter())\n .any(|path| paths.contains(&path))\n {\n // Always reload the primary configuration file.\n let event = Event::new(EventType::ConfigReload(paths[0].clone()), None);\n let _ = event_proxy.send_event(event);\n }\n },\n Ok(Err(err)) => {\n debug!(\"Config watcher errors: {:?}\", err);\n },\n Err(err) => {\n debug!(\"Config watcher channel dropped unexpectedly: {}\", err);\n break;\n },\n };\n }\n });\n\n Some(Self { watched_hash, thread: join_handle, shutdown_tx: tx })\n }\n\n /// Synchronously shut down the monitor.\n pub fn shutdown(self) {\n // Request shutdown.\n let mut event = NotifyEvent::new(EventKind::Other);\n event = event.set_info(\"shutdown\");\n let _ = self.shutdown_tx.send(Ok(event));\n\n // Wait for thread to terminate.\n if let Err(err) = self.thread.join() {\n warn!(\"config monitor shutdown failed: {err:?}\");\n }\n }\n\n /// Check if the config monitor needs to be restarted.\n ///\n /// This checks the supplied list of files against the monitored files to determine if a\n /// restart is necessary.\n pub fn needs_restart(&self, files: &[PathBuf]) -> bool {\n Self::hash_paths(files).map_or(true, |hash| Some(hash) == self.watched_hash)\n }\n\n /// Generate the hash for a list of paths.\n fn hash_paths(files: &[PathBuf]) -> Option<u64> {\n // Use file count limit to avoid allocations.\n const MAX_PATHS: usize = 1024;\n if files.len() > MAX_PATHS {\n return None;\n }\n\n // Sort files to avoid restart on order change.\n let mut sorted_files = [None; MAX_PATHS];\n for (i, file) in files.iter().enumerate() {\n sorted_files[i] = Some(file);\n }\n sorted_files.sort_unstable();\n\n // Calculate hash for the paths, regardless of order.\n let mut hasher = DefaultHasher::new();\n Hash::hash_slice(&sorted_files, &mut hasher);\n Some(hasher.finish())\n }\n}",
"class_signature": "impl ConfigMonitor"
} |
load_imports | alacritty-master/alacritty/src/config/mod.rs | fn load_imports(
config: &Value,
base_path: &Path,
config_paths: &mut Vec<PathBuf>,
recursion_limit: usize,
) -> Value {
// Get paths for all imports.
let import_paths = match imports(config, base_path, recursion_limit) {
Ok(import_paths) => import_paths,
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "{err}");
return Value::Table(Table::new());
},
};
// Parse configs for all imports recursively.
let mut merged = Value::Table(Table::new());
for import_path in import_paths {
let path = match import_path {
Ok(path) => path,
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "{err}");
continue;
},
};
match parse_config(&path, config_paths, recursion_limit - 1) {
Ok(config) => merged = serde_utils::merge(merged, config),
Err(Error::Io(io)) if io.kind() == io::ErrorKind::NotFound => {
info!(target: LOG_TARGET_CONFIG, "Config import not found:\n {:?}", path.display());
continue;
},
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "Unable to import config {:?}: {}", path, err)
},
}
}
merged
} | use std::fmt::{self, Display, Formatter};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use std::{env, fs, io};
use log::{debug, error, info, warn};
use serde::Deserialize;
use serde_yaml::Error as YamlError;
use toml::de::Error as TomlError;
use toml::ser::Error as TomlSeError;
use toml::{Table, Value};
pub mod bell;
pub mod color;
pub mod cursor;
pub mod debug;
pub mod font;
pub mod general;
pub mod monitor;
pub mod scrolling;
pub mod selection;
pub mod serde_utils;
pub mod terminal;
pub mod ui_config;
pub mod window;
mod bindings;
mod mouse;
use crate::cli::Options;
#[cfg(test)]
pub use crate::config::bindings::Binding;
pub use crate::config::bindings::{
Action, BindingKey, BindingMode, KeyBinding, MouseAction, SearchAction, ViAction,
};
pub use crate::config::ui_config::UiConfig;
use crate::logging::LOG_TARGET_CONFIG;
/// Maximum number of depth for the configuration file imports.
pub const IMPORT_RECURSION_LIMIT: usize = 5;
/// Result from config loading.
pub type Result<T> = std::result::Result<T, Error>;
/// Errors occurring during config loading.
#[derive(Debug)]
pub enum Error {
/// Couldn't read $HOME environment variable.
ReadingEnvHome(env::VarError),
/// io error reading file.
Io(io::Error),
/// Invalid toml.
Toml(TomlError),
/// Failed toml serialization.
TomlSe(TomlSeError),
/// Invalid yaml.
Yaml(YamlError),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::ReadingEnvHome(err) => err.source(),
Error::Io(err) => err.source(),
Error::Toml(err) => err.source(),
Error::TomlSe(err) => err.source(),
Error::Yaml(err) => err.source(),
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::ReadingEnvHome(err) => {
write!(f, "Unable to read $HOME environment variable: {err}")
},
Error::Io(err) => write!(f, "Error reading config file: {err}"),
Error::Toml(err) => write!(f, "Config error: {err}"),
Error::TomlSe(err) => write!(f, "Yaml conversion error: {err}"),
Error::Yaml(err) => write!(f, "Config error: {err}"),
}
}
}
impl From<env::VarError> for Error {
fn from(val: env::VarError) -> Self {
Error::ReadingEnvHome(val)
}
}
impl From<io::Error> for Error {
fn from(val: io::Error) -> Self {
Error::Io(val)
}
}
impl From<TomlError> for Error {
fn from(val: TomlError) -> Self {
Error::Toml(val)
}
}
impl From<TomlSeError> for Error {
fn from(val: TomlSeError) -> Self {
Error::TomlSe(val)
}
}
impl From<YamlError> for Error {
fn from(val: YamlError) -> Self {
Error::Yaml(val)
}
}
/// Load the configuration file.
pub fn load(options: &mut Options) -> UiConfig {
let config_path = options
.config_file
.clone()
.or_else(|| installed_config("toml"))
.or_else(|| installed_config("yml"));
// Load the config using the following fallback behavior:
// - Config path + CLI overrides
// - CLI overrides
// - Default
let mut config = config_path
.as_ref()
.and_then(|config_path| load_from(config_path).ok())
.unwrap_or_else(|| {
let mut config = UiConfig::default();
match config_path {
Some(config_path) => config.config_paths.push(config_path),
None => info!(target: LOG_TARGET_CONFIG, "No config file found; using default"),
}
config
});
after_loading(&mut config, options);
config
}
/// Attempt to reload the configuration file.
pub fn reload(config_path: &Path, options: &mut Options) -> Result<UiConfig> {
debug!("Reloading configuration file: {:?}", config_path);
// Load config, propagating errors.
let mut config = load_from(config_path)?;
after_loading(&mut config, options);
Ok(config)
}
/// Modifications after the `UiConfig` object is created.
fn after_loading(config: &mut UiConfig, options: &mut Options) {
// Override config with CLI options.
options.override_config(config);
}
/// Load configuration file and log errors.
fn load_from(path: &Path) -> Result<UiConfig> {
match read_config(path) {
Ok(config) => Ok(config),
Err(Error::Io(io)) if io.kind() == io::ErrorKind::NotFound => {
error!(target: LOG_TARGET_CONFIG, "Unable to load config {:?}: File not found", path);
Err(Error::Io(io))
},
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "Unable to load config {:?}: {}", path, err);
Err(err)
},
}
}
/// Deserialize configuration file from path.
fn read_config(path: &Path) -> Result<UiConfig> {
let mut config_paths = Vec::new();
let config_value = parse_config(path, &mut config_paths, IMPORT_RECURSION_LIMIT)?;
// Deserialize to concrete type.
let mut config = UiConfig::deserialize(config_value)?;
config.config_paths = config_paths;
Ok(config)
}
/// Deserialize all configuration files as generic Value.
fn parse_config(
path: &Path,
config_paths: &mut Vec<PathBuf>,
recursion_limit: usize,
) -> Result<Value> {
config_paths.push(path.to_owned());
// Deserialize the configuration file.
let config = deserialize_config(path, false)?;
// Merge config with imports.
let imports = load_imports(&config, path, config_paths, recursion_limit);
Ok(serde_utils::merge(imports, config))
}
/// Deserialize a configuration file.
pub fn deserialize_config(path: &Path, warn_pruned: bool) -> Result<Value> {
let mut contents = fs::read_to_string(path)?;
// Remove UTF-8 BOM.
if contents.starts_with('\u{FEFF}') {
contents = contents.split_off(3);
}
// Convert YAML to TOML as a transitionary fallback mechanism.
let extension = path.extension().unwrap_or_default();
if (extension == "yaml" || extension == "yml") && !contents.trim().is_empty() {
warn!(
"YAML config {path:?} is deprecated, please migrate to TOML using `alacritty migrate`"
);
let mut value: serde_yaml::Value = serde_yaml::from_str(&contents)?;
prune_yaml_nulls(&mut value, warn_pruned);
contents = toml::to_string(&value)?;
}
// Load configuration file as Value.
let config: Value = toml::from_str(&contents)?;
Ok(config)
}
/// Load all referenced configuration files.
fn load_imports(
config: &Value,
base_path: &Path,
config_paths: &mut Vec<PathBuf>,
recursion_limit: usize,
) -> Value {
// Get paths for all imports.
let import_paths = match imports(config, base_path, recursion_limit) {
Ok(import_paths) => import_paths,
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "{err}");
return Value::Table(Table::new());
},
};
// Parse configs for all imports recursively.
let mut merged = Value::Table(Table::new());
for import_path in import_paths {
let path = match import_path {
Ok(path) => path,
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "{err}");
continue;
},
};
match parse_config(&path, config_paths, recursion_limit - 1) {
Ok(config) => merged = serde_utils::merge(merged, config),
Err(Error::Io(io)) if io.kind() == io::ErrorKind::NotFound => {
info!(target: LOG_TARGET_CONFIG, "Config import not found:\n {:?}", path.display());
continue;
},
Err(err) => {
error!(target: LOG_TARGET_CONFIG, "Unable to import config {:?}: {}", path, err)
},
}
}
merged
}
/// Get all import paths for a configuration.
pub fn imports(
config: &Value,
base_path: &Path,
recursion_limit: usize,
) -> StdResult<Vec<StdResult<PathBuf, String>>, String> {
let imports =
config.get("import").or_else(|| config.get("general").and_then(|g| g.get("import")));
let imports = match imports {
Some(Value::Array(imports)) => imports,
Some(_) => return Err("Invalid import type: expected a sequence".into()),
None => return Ok(Vec::new()),
};
// Limit recursion to prevent infinite loops.
if !imports.is_empty() && recursion_limit == 0 {
return Err("Exceeded maximum configuration import depth".into());
}
let mut import_paths = Vec::new();
for import in imports {
let path = match import {
Value::String(path) => PathBuf::from(path),
_ => {
import_paths.push(Err("Invalid import element type: expected path string".into()));
continue;
},
};
let normalized = normalize_import(base_path, path);
import_paths.push(Ok(normalized));
}
Ok(import_paths)
}
/// Normalize import paths.
pub fn normalize_import(base_config_path: &Path, import_path: impl Into<PathBuf>) -> PathBuf {
let mut import_path = import_path.into();
// Resolve paths relative to user's home directory.
if let (Ok(stripped), Some(home_dir)) = (import_path.strip_prefix("~/"), home::home_dir()) {
import_path = home_dir.join(stripped);
}
if import_path.is_relative() {
if let Some(base_config_dir) = base_config_path.parent() {
import_path = base_config_dir.join(import_path)
}
}
import_path
}
/// Prune the nulls from the YAML to ensure TOML compatibility.
fn prune_yaml_nulls(value: &mut serde_yaml::Value, warn_pruned: bool) {
fn walk(value: &mut serde_yaml::Value, warn_pruned: bool) -> bool {
match value {
serde_yaml::Value::Sequence(sequence) => {
sequence.retain_mut(|value| !walk(value, warn_pruned));
sequence.is_empty()
},
serde_yaml::Value::Mapping(mapping) => {
mapping.retain(|key, value| {
let retain = !walk(value, warn_pruned);
if let Some(key_name) = key.as_str().filter(|_| !retain && warn_pruned) {
eprintln!("Removing null key \"{key_name}\" from the end config");
}
retain
});
mapping.is_empty()
},
serde_yaml::Value::Null => true,
_ => false,
}
}
if walk(value, warn_pruned) {
// When the value itself is null return the mapping.
*value = serde_yaml::Value::Mapping(Default::default());
}
}
/// Get the location of the first found default config file paths
/// according to the following order:
///
/// 1. $XDG_CONFIG_HOME/alacritty/alacritty.toml
/// 2. $XDG_CONFIG_HOME/alacritty.toml
/// 3. $HOME/.config/alacritty/alacritty.toml
/// 4. $HOME/.alacritty.toml
#[cfg(not(windows))]
pub fn installed_config(suffix: &str) -> Option<PathBuf> {
let file_name = format!("alacritty.{suffix}");
// Try using XDG location by default.
xdg::BaseDirectories::with_prefix("alacritty")
.find_config_file(&file_name)
.or_else(|| xdg::BaseDirectories::new().find_config_file(&file_name))
.or_else(|| {
if let Ok(home) = env::var("HOME") {
// Fallback path: $HOME/.config/alacritty/alacritty.toml.
let fallback = PathBuf::from(&home).join(".config/alacritty").join(&file_name);
if fallback.exists() {
return Some(fallback);
}
// Fallback path: $HOME/.alacritty.toml.
let hidden_name = format!(".{file_name}");
let fallback = PathBuf::from(&home).join(hidden_name);
if fallback.exists() {
return Some(fallback);
}
}
None
})
}
#[cfg(windows)]
pub fn installed_config(suffix: &str) -> Option<PathBuf> {
let file_name = format!("alacritty.{suffix}");
dirs::config_dir().map(|path| path.join("alacritty").join(file_name)).filter(|new| new.exists())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_config() {
toml::from_str::<UiConfig>("").unwrap();
}
fn yaml_to_toml(contents: &str) -> String {
let mut value: serde_yaml::Value = serde_yaml::from_str(contents).unwrap();
prune_yaml_nulls(&mut value, false);
toml::to_string(&value).unwrap()
}
#[test]
fn yaml_with_nulls() {
let contents = r#"
window:
blinking: Always
cursor:
not_blinking: Always
some_array:
- { window: }
- { window: "Hello" }
"#;
let toml = yaml_to_toml(contents);
assert_eq!(
toml.trim(),
r#"[window]
blinking = "Always"
not_blinking = "Always"
[[window.some_array]]
window = "Hello""#
);
}
#[test]
fn empty_yaml_to_toml() {
let contents = r#"
"#;
let toml = yaml_to_toml(contents);
assert!(toml.is_empty());
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub enum Value {\n /// Represents a TOML string\n String(String),\n /// Represents a TOML integer\n Integer(i64),\n /// Represents a TOML float\n Float(f64),\n /// Represents a TOML boolean\n Boolean(bool),\n /// Represents a TOML datetime\n Datetime(Datetime),\n /// Represents a TOML array\n Array(Array),\n /// Represents a TOML table\n Table(Table),\n}"
],
"name": "config",
"type": "&Value"
},
{
"definitions": [
"pub struct Path {\n inner: OsStr,\n}"
],
"name": "base_path",
"type": "&Path"
},
{
"definitions": [
"pub struct Vec<T, #[unstable(feature = \"allocator_api\", issue = \"32838\")] A: Allocator = Global> {\n buf: RawVec<T, A>,\n len: usize,\n}",
"pub struct PathBuf {\n inner: OsString,\n}"
],
"name": "config_paths",
"type": "&mut Vec<PathBuf>"
}
],
"end_line": 277,
"name": "load_imports",
"signature": "fn load_imports(\n config: &Value,\n base_path: &Path,\n config_paths: &mut Vec<PathBuf>,\n recursion_limit: usize,\n) -> Value",
"start_line": 238
} | {
"class_name": "",
"class_signature": ""
} |
keyboard_input | alacritty-master/alacritty/src/display/hint.rs | pub fn keyboard_input(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {
match c {
// Use backspace to remove the last character pressed.
'\x08' | '\x1f' => {
self.keys.pop();
},
// Cancel hint highlighting on ESC/Ctrl+c.
'\x1b' | '\x03' => self.stop(),
_ => (),
}
// Update the visible matches.
self.update_matches(term);
let hint = self.hint.as_ref()?;
// Find the last label starting with the input character.
let mut labels = self.labels.iter().enumerate().rev();
let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;
// Check if the selected label is fully matched.
if label.len() == 1 {
let bounds = self.matches[index].clone();
let hint = hint.clone();
// Exit hint mode unless it requires explicit dismissal.
if hint.persist {
self.keys.clear();
} else {
self.stop();
}
// Hyperlinks take precedence over regex matches.
let hyperlink = term.grid()[*bounds.start()].hyperlink();
Some(HintMatch { bounds, hyperlink, hint })
} else {
// Store character to preserve the selection.
self.keys.push(c);
None
}
} | use std::borrow::Cow;
use std::cmp::Reverse;
use std::collections::HashSet;
use std::iter;
use std::rc::Rc;
use ahash::RandomState;
use winit::keyboard::ModifiersState;
use alacritty_terminal::grid::{BidirectionalIterator, Dimensions};
use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point};
use alacritty_terminal::term::cell::Hyperlink;
use alacritty_terminal::term::search::{Match, RegexIter, RegexSearch};
use alacritty_terminal::term::{Term, TermMode};
use crate::config::ui_config::{Hint, HintAction};
use crate::config::UiConfig;
/// Maximum number of linewraps followed outside of the viewport during search highlighting.
pub const MAX_SEARCH_LINES: usize = 100;
/// Percentage of characters in the hints alphabet used for the last character.
const HINT_SPLIT_PERCENTAGE: f32 = 0.5;
/// Keyboard regex hint state.
pub struct HintState {
/// Hint currently in use.
hint: Option<Rc<Hint>>,
/// Alphabet for hint labels.
alphabet: String,
/// Visible matches.
matches: Vec<Match>,
/// Key label for each visible match.
labels: Vec<Vec<char>>,
/// Keys pressed for hint selection.
keys: Vec<char>,
}
impl HintState {
/// Initialize an inactive hint state.
pub fn new<S: Into<String>>(alphabet: S) -> Self {
Self {
alphabet: alphabet.into(),
hint: Default::default(),
matches: Default::default(),
labels: Default::default(),
keys: Default::default(),
}
}
/// Check if a hint selection is in progress.
pub fn active(&self) -> bool {
self.hint.is_some()
}
/// Start the hint selection process.
pub fn start(&mut self, hint: Rc<Hint>) {
self.hint = Some(hint);
}
/// Cancel the hint highlighting process.
fn stop(&mut self) {
self.matches.clear();
self.labels.clear();
self.keys.clear();
self.hint = None;
}
/// Update the visible hint matches and key labels.
pub fn update_matches<T>(&mut self, term: &Term<T>) {
let hint = match self.hint.as_mut() {
Some(hint) => hint,
None => return,
};
// Clear current matches.
self.matches.clear();
// Add escape sequence hyperlinks.
if hint.content.hyperlinks {
self.matches.extend(visible_unique_hyperlinks_iter(term));
}
// Add visible regex matches.
if let Some(regex) = hint.content.regex.as_ref() {
regex.with_compiled(|regex| {
let matches = visible_regex_match_iter(term, regex);
// Apply post-processing and search for sub-matches if necessary.
if hint.post_processing {
let mut matches = matches.collect::<Vec<_>>();
self.matches.extend(matches.drain(..).flat_map(|rm| {
HintPostProcessor::new(term, regex, rm).collect::<Vec<_>>()
}));
} else {
self.matches.extend(matches);
}
});
}
// Cancel highlight with no visible matches.
if self.matches.is_empty() {
self.stop();
return;
}
// Sort and dedup ranges. Currently overlapped but not exactly same ranges are kept.
self.matches.sort_by_key(|bounds| (*bounds.start(), Reverse(*bounds.end())));
self.matches.dedup_by_key(|bounds| *bounds.start());
let mut generator = HintLabels::new(&self.alphabet, HINT_SPLIT_PERCENTAGE);
let match_count = self.matches.len();
let keys_len = self.keys.len();
// Get the label for each match.
self.labels.resize(match_count, Vec::new());
for i in (0..match_count).rev() {
let mut label = generator.next();
if label.len() >= keys_len && label[..keys_len] == self.keys[..] {
self.labels[i] = label.split_off(keys_len);
} else {
self.labels[i] = Vec::new();
}
}
}
/// Handle keyboard input during hint selection.
pub fn keyboard_input<T>(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {
match c {
// Use backspace to remove the last character pressed.
'\x08' | '\x1f' => {
self.keys.pop();
},
// Cancel hint highlighting on ESC/Ctrl+c.
'\x1b' | '\x03' => self.stop(),
_ => (),
}
// Update the visible matches.
self.update_matches(term);
let hint = self.hint.as_ref()?;
// Find the last label starting with the input character.
let mut labels = self.labels.iter().enumerate().rev();
let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;
// Check if the selected label is fully matched.
if label.len() == 1 {
let bounds = self.matches[index].clone();
let hint = hint.clone();
// Exit hint mode unless it requires explicit dismissal.
if hint.persist {
self.keys.clear();
} else {
self.stop();
}
// Hyperlinks take precedence over regex matches.
let hyperlink = term.grid()[*bounds.start()].hyperlink();
Some(HintMatch { bounds, hyperlink, hint })
} else {
// Store character to preserve the selection.
self.keys.push(c);
None
}
}
/// Hint key labels.
pub fn labels(&self) -> &Vec<Vec<char>> {
&self.labels
}
/// Visible hint regex matches.
pub fn matches(&self) -> &[Match] {
&self.matches
}
/// Update the alphabet used for hint labels.
pub fn update_alphabet(&mut self, alphabet: &str) {
if self.alphabet != alphabet {
alphabet.clone_into(&mut self.alphabet);
self.keys.clear();
}
}
}
/// Hint match which was selected by the user.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct HintMatch {
/// Terminal range matching the hint.
bounds: Match,
/// OSC 8 hyperlink.
hyperlink: Option<Hyperlink>,
/// Hint which triggered this match.
hint: Rc<Hint>,
}
impl HintMatch {
#[inline]
pub fn should_highlight(&self, point: Point, pointed_hyperlink: Option<&Hyperlink>) -> bool {
self.hyperlink.as_ref() == pointed_hyperlink
&& (self.hyperlink.is_some() || self.bounds.contains(&point))
}
#[inline]
pub fn action(&self) -> &HintAction {
&self.hint.action
}
#[inline]
pub fn bounds(&self) -> &Match {
&self.bounds
}
pub fn hyperlink(&self) -> Option<&Hyperlink> {
self.hyperlink.as_ref()
}
/// Get the text content of the hint match.
///
/// This will always revalidate the hint text, to account for terminal content
/// changes since the [`HintMatch`] was constructed. The text of the hint might
/// be different from its original value, but it will **always** be a valid
/// match for this hint.
pub fn text<T>(&self, term: &Term<T>) -> Option<Cow<'_, str>> {
// Revalidate hyperlink match.
if let Some(hyperlink) = &self.hyperlink {
let (validated, bounds) = hyperlink_at(term, *self.bounds.start())?;
return (&validated == hyperlink && bounds == self.bounds)
.then(|| hyperlink.uri().into());
}
// Revalidate regex match.
let regex = self.hint.content.regex.as_ref()?;
let bounds = regex.with_compiled(|regex| {
regex_match_at(term, *self.bounds.start(), regex, self.hint.post_processing)
})??;
(bounds == self.bounds)
.then(|| term.bounds_to_string(*bounds.start(), *bounds.end()).into())
}
}
/// Generator for creating new hint labels.
struct HintLabels {
/// Full character set available.
alphabet: Vec<char>,
/// Alphabet indices for the next label.
indices: Vec<usize>,
/// Point separating the alphabet's head and tail characters.
///
/// To make identification of the tail character easy, part of the alphabet cannot be used for
/// any other position.
///
/// All characters in the alphabet before this index will be used for the last character, while
/// the rest will be used for everything else.
split_point: usize,
}
impl HintLabels {
/// Create a new label generator.
///
/// The `split_ratio` should be a number between 0.0 and 1.0 representing the percentage of
/// elements in the alphabet which are reserved for the tail of the hint label.
fn new(alphabet: impl Into<String>, split_ratio: f32) -> Self {
let alphabet: Vec<char> = alphabet.into().chars().collect();
let split_point = ((alphabet.len() - 1) as f32 * split_ratio.min(1.)) as usize;
Self { indices: vec![0], split_point, alphabet }
}
/// Get the characters for the next label.
fn next(&mut self) -> Vec<char> {
let characters = self.indices.iter().rev().map(|index| self.alphabet[*index]).collect();
self.increment();
characters
}
/// Increment the character sequence.
fn increment(&mut self) {
// Increment the last character; if it's not at the split point we're done.
let tail = &mut self.indices[0];
if *tail < self.split_point {
*tail += 1;
return;
}
*tail = 0;
// Increment all other characters in reverse order.
let alphabet_len = self.alphabet.len();
for index in self.indices.iter_mut().skip(1) {
if *index + 1 == alphabet_len {
// Reset character and move to the next if it's already at the limit.
*index = self.split_point + 1;
} else {
// If the character can be incremented, we're done.
*index += 1;
return;
}
}
// Extend the sequence with another character when nothing could be incremented.
self.indices.push(self.split_point + 1);
}
}
/// Iterate over all visible regex matches.
pub fn visible_regex_match_iter<'a, T>(
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> impl Iterator<Item = Match> + 'a {
let viewport_start = Line(-(term.grid().display_offset() as i32));
let viewport_end = viewport_start + term.bottommost_line();
let mut start = term.line_search_left(Point::new(viewport_start, Column(0)));
let mut end = term.line_search_right(Point::new(viewport_end, Column(0)));
start.line = start.line.max(viewport_start - MAX_SEARCH_LINES);
end.line = end.line.min(viewport_end + MAX_SEARCH_LINES);
RegexIter::new(start, end, Direction::Right, term, regex)
.skip_while(move |rm| rm.end().line < viewport_start)
.take_while(move |rm| rm.start().line <= viewport_end)
}
/// Iterate over all visible hyperlinks, yanking only unique ones.
pub fn visible_unique_hyperlinks_iter<T>(term: &Term<T>) -> impl Iterator<Item = Match> + '_ {
let mut display_iter = term.grid().display_iter().peekable();
// Avoid creating hints for the same hyperlinks, but from a different places.
let mut unique_hyperlinks = HashSet::<Hyperlink, RandomState>::default();
iter::from_fn(move || {
// Find the start of the next unique hyperlink.
let (cell, hyperlink) = display_iter.find_map(|cell| {
let hyperlink = cell.hyperlink()?;
(!unique_hyperlinks.contains(&hyperlink)).then(|| {
unique_hyperlinks.insert(hyperlink.clone());
(cell, hyperlink)
})
})?;
let start = cell.point;
let mut end = start;
// Find the end bound of just found unique hyperlink.
while let Some(next_cell) = display_iter.peek() {
// Cell at display iter doesn't match, yield the hyperlink and start over with
// `find_map`.
if next_cell.hyperlink().as_ref() != Some(&hyperlink) {
break;
}
// Advance to the next cell.
end = next_cell.point;
let _ = display_iter.next();
}
Some(start..=end)
})
}
/// Retrieve the match, if the specified point is inside the content matching the regex.
fn regex_match_at<T>(
term: &Term<T>,
point: Point,
regex: &mut RegexSearch,
post_processing: bool,
) -> Option<Match> {
let regex_match = visible_regex_match_iter(term, regex).find(|rm| rm.contains(&point))?;
// Apply post-processing and search for sub-matches if necessary.
if post_processing {
HintPostProcessor::new(term, regex, regex_match).find(|rm| rm.contains(&point))
} else {
Some(regex_match)
}
}
/// Check if there is a hint highlighted at the specified point.
pub fn highlighted_at<T>(
term: &Term<T>,
config: &UiConfig,
point: Point,
mouse_mods: ModifiersState,
) -> Option<HintMatch> {
let mouse_mode = term.mode().intersects(TermMode::MOUSE_MODE);
config.hints.enabled.iter().find_map(|hint| {
// Check if all required modifiers are pressed.
let highlight = hint.mouse.is_some_and(|mouse| {
mouse.enabled
&& mouse_mods.contains(mouse.mods.0)
&& (!mouse_mode || mouse_mods.contains(ModifiersState::SHIFT))
});
if !highlight {
return None;
}
if let Some((hyperlink, bounds)) =
hint.content.hyperlinks.then(|| hyperlink_at(term, point)).flatten()
{
return Some(HintMatch { bounds, hyperlink: Some(hyperlink), hint: hint.clone() });
}
let bounds = hint.content.regex.as_ref().and_then(|regex| {
regex.with_compiled(|regex| regex_match_at(term, point, regex, hint.post_processing))
});
if let Some(bounds) = bounds.flatten() {
return Some(HintMatch { bounds, hint: hint.clone(), hyperlink: None });
}
None
})
}
/// Retrieve the hyperlink with its range, if there is one at the specified point.
///
/// This will only return contiguous cells, even if another hyperlink with the same ID exists.
fn hyperlink_at<T>(term: &Term<T>, point: Point) -> Option<(Hyperlink, Match)> {
let hyperlink = term.grid()[point].hyperlink()?;
let grid = term.grid();
let mut match_end = point;
for cell in grid.iter_from(point) {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_end = cell.point;
} else {
break;
}
}
let mut match_start = point;
let mut iter = grid.iter_from(point);
while let Some(cell) = iter.prev() {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_start = cell.point;
} else {
break;
}
}
Some((hyperlink, match_start..=match_end))
}
/// Iterator over all post-processed matches inside an existing hint match.
struct HintPostProcessor<'a, T> {
/// Regex search DFAs.
regex: &'a mut RegexSearch,
/// Terminal reference.
term: &'a Term<T>,
/// Next hint match in the iterator.
next_match: Option<Match>,
/// Start point for the next search.
start: Point,
/// End point for the hint match iterator.
end: Point,
}
impl<'a, T> HintPostProcessor<'a, T> {
/// Create a new iterator for an unprocessed match.
fn new(term: &'a Term<T>, regex: &'a mut RegexSearch, regex_match: Match) -> Self {
let mut post_processor = Self {
next_match: None,
start: *regex_match.start(),
end: *regex_match.end(),
term,
regex,
};
// Post-process the first hint match.
post_processor.next_processed_match(regex_match);
post_processor
}
/// Apply some hint post processing heuristics.
///
/// This will check the end of the hint and make it shorter if certain characters are determined
/// to be unlikely to be intentionally part of the hint.
///
/// This is most useful for identifying URLs appropriately.
fn hint_post_processing(&self, regex_match: &Match) -> Option<Match> {
let mut iter = self.term.grid().iter_from(*regex_match.start());
let mut c = iter.cell().c;
// Truncate uneven number of brackets.
let end = *regex_match.end();
let mut open_parents = 0;
let mut open_brackets = 0;
loop {
match c {
'(' => open_parents += 1,
'[' => open_brackets += 1,
')' => {
if open_parents == 0 {
iter.prev();
break;
} else {
open_parents -= 1;
}
},
']' => {
if open_brackets == 0 {
iter.prev();
break;
} else {
open_brackets -= 1;
}
},
_ => (),
}
if iter.point() == end {
break;
}
match iter.next() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
// Truncate trailing characters which are likely to be delimiters.
let start = *regex_match.start();
while iter.point() != start {
if !matches!(c, '.' | ',' | ':' | ';' | '?' | '!' | '(' | '[' | '\'') {
break;
}
match iter.prev() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
if start > iter.point() {
None
} else {
Some(start..=iter.point())
}
}
/// Loop over submatches until a non-empty post-processed match is found.
fn next_processed_match(&mut self, mut regex_match: Match) {
self.next_match = loop {
if let Some(next_match) = self.hint_post_processing(®ex_match) {
self.start = next_match.end().add(self.term, Boundary::Grid, 1);
break Some(next_match);
}
self.start = regex_match.start().add(self.term, Boundary::Grid, 1);
if self.start > self.end {
return;
}
match self.term.regex_search_right(self.regex, self.start, self.end) {
Some(rm) => regex_match = rm,
None => return,
}
};
}
}
impl<T> Iterator for HintPostProcessor<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
let next_match = self.next_match.take()?;
if self.start <= self.end {
if let Some(rm) = self.term.regex_search_right(self.regex, self.start, self.end) {
self.next_processed_match(rm);
}
}
Some(next_match)
}
}
#[cfg(test)]
mod tests {
use alacritty_terminal::index::{Column, Line};
use alacritty_terminal::term::test::mock_term;
use alacritty_terminal::vte::ansi::Handler;
use super::*;
#[test]
fn hint_label_generation() {
let mut generator = HintLabels::new("0123", 0.5);
assert_eq!(generator.next(), vec!['0']);
assert_eq!(generator.next(), vec!['1']);
assert_eq!(generator.next(), vec!['2', '0']);
assert_eq!(generator.next(), vec!['2', '1']);
assert_eq!(generator.next(), vec!['3', '0']);
assert_eq!(generator.next(), vec!['3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '2', '3', '1']);
assert_eq!(generator.next(), vec!['2', '3', '2', '0']);
assert_eq!(generator.next(), vec!['2', '3', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '2', '1']);
assert_eq!(generator.next(), vec!['3', '2', '3', '0']);
assert_eq!(generator.next(), vec!['3', '2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '3', '1']);
}
#[test]
fn closed_bracket_does_not_result_in_infinite_iterator() {
let term = mock_term(" ) ");
let mut search = RegexSearch::new("[^/ ]").unwrap();
let count = HintPostProcessor::new(
&term,
&mut search,
Point::new(Line(0), Column(1))..=Point::new(Line(0), Column(1)),
)
.take(1)
.count();
assert_eq!(count, 0);
}
#[test]
fn collect_unique_hyperlinks() {
let mut term = mock_term("000\r\n111");
term.goto(0, 0);
let hyperlink_foo = Hyperlink::new(Some("1"), String::from("foo"));
let hyperlink_bar = Hyperlink::new(Some("2"), String::from("bar"));
// Create 2 hyperlinks on the first line.
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.clone().into()));
term.input('r');
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.goto(1, 0);
// Ditto for the second line.
term.set_hyperlink(Some(hyperlink_foo.into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.into()));
term.input('r');
term.set_hyperlink(None);
let mut unique_hyperlinks = visible_unique_hyperlinks_iter(&term);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(0)), Point::new(Line(0), Column(1)))),
unique_hyperlinks.next()
);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(2)), Point::new(Line(0), Column(2)))),
unique_hyperlinks.next()
);
assert_eq!(None, unique_hyperlinks.next());
}
#[test]
fn visible_regex_match_covers_entire_viewport() {
let content = "I'm a match!\r\n".repeat(4096);
// The Term returned from this call will have a viewport starting at 0 and ending at 4096.
// That's good enough for this test, since it only cares about visible content.
let term = mock_term(&content);
let mut regex = RegexSearch::new("match!").unwrap();
// The iterator should match everything in the viewport.
assert_eq!(visible_regex_match_iter(&term, &mut regex).count(), 4096);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
}
],
"end_line": 173,
"name": "keyboard_input",
"signature": "pub fn keyboard_input(&mut self, term: &Term<T>, c: char) -> Option<HintMatch>",
"start_line": 132
} | {
"class_name": "impl HintState {\n /// Initialize an inactive hint state.\n pub fn new<S: Into<String>>(alphabet: S) -> Self {\n Self {\n alphabet: alphabet.into(),\n hint: Default::default(),\n matches: Default::default(),\n labels: Default::default(),\n keys: Default::default(),\n }\n }\n\n /// Check if a hint selection is in progress.\n pub fn active(&self) -> bool {\n self.hint.is_some()\n }\n\n /// Start the hint selection process.\n pub fn start(&mut self, hint: Rc<Hint>) {\n self.hint = Some(hint);\n }\n\n /// Cancel the hint highlighting process.\n fn stop(&mut self) {\n self.matches.clear();\n self.labels.clear();\n self.keys.clear();\n self.hint = None;\n }\n\n /// Update the visible hint matches and key labels.\n pub fn update_matches<T>(&mut self, term: &Term<T>) {\n let hint = match self.hint.as_mut() {\n Some(hint) => hint,\n None => return,\n };\n\n // Clear current matches.\n self.matches.clear();\n\n // Add escape sequence hyperlinks.\n if hint.content.hyperlinks {\n self.matches.extend(visible_unique_hyperlinks_iter(term));\n }\n\n // Add visible regex matches.\n if let Some(regex) = hint.content.regex.as_ref() {\n regex.with_compiled(|regex| {\n let matches = visible_regex_match_iter(term, regex);\n\n // Apply post-processing and search for sub-matches if necessary.\n if hint.post_processing {\n let mut matches = matches.collect::<Vec<_>>();\n self.matches.extend(matches.drain(..).flat_map(|rm| {\n HintPostProcessor::new(term, regex, rm).collect::<Vec<_>>()\n }));\n } else {\n self.matches.extend(matches);\n }\n });\n }\n\n // Cancel highlight with no visible matches.\n if self.matches.is_empty() {\n self.stop();\n return;\n }\n\n // Sort and dedup ranges. Currently overlapped but not exactly same ranges are kept.\n self.matches.sort_by_key(|bounds| (*bounds.start(), Reverse(*bounds.end())));\n self.matches.dedup_by_key(|bounds| *bounds.start());\n\n let mut generator = HintLabels::new(&self.alphabet, HINT_SPLIT_PERCENTAGE);\n let match_count = self.matches.len();\n let keys_len = self.keys.len();\n\n // Get the label for each match.\n self.labels.resize(match_count, Vec::new());\n for i in (0..match_count).rev() {\n let mut label = generator.next();\n if label.len() >= keys_len && label[..keys_len] == self.keys[..] {\n self.labels[i] = label.split_off(keys_len);\n } else {\n self.labels[i] = Vec::new();\n }\n }\n }\n\n /// Handle keyboard input during hint selection.\n pub fn keyboard_input<T>(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {\n match c {\n // Use backspace to remove the last character pressed.\n '\\x08' | '\\x1f' => {\n self.keys.pop();\n },\n // Cancel hint highlighting on ESC/Ctrl+c.\n '\\x1b' | '\\x03' => self.stop(),\n _ => (),\n }\n\n // Update the visible matches.\n self.update_matches(term);\n\n let hint = self.hint.as_ref()?;\n\n // Find the last label starting with the input character.\n let mut labels = self.labels.iter().enumerate().rev();\n let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;\n\n // Check if the selected label is fully matched.\n if label.len() == 1 {\n let bounds = self.matches[index].clone();\n let hint = hint.clone();\n\n // Exit hint mode unless it requires explicit dismissal.\n if hint.persist {\n self.keys.clear();\n } else {\n self.stop();\n }\n\n // Hyperlinks take precedence over regex matches.\n let hyperlink = term.grid()[*bounds.start()].hyperlink();\n Some(HintMatch { bounds, hyperlink, hint })\n } else {\n // Store character to preserve the selection.\n self.keys.push(c);\n\n None\n }\n }\n\n /// Hint key labels.\n pub fn labels(&self) -> &Vec<Vec<char>> {\n &self.labels\n }\n\n /// Visible hint regex matches.\n pub fn matches(&self) -> &[Match] {\n &self.matches\n }\n\n /// Update the alphabet used for hint labels.\n pub fn update_alphabet(&mut self, alphabet: &str) {\n if self.alphabet != alphabet {\n alphabet.clone_into(&mut self.alphabet);\n self.keys.clear();\n }\n }\n}",
"class_signature": "impl HintState"
} |
highlighted_at | alacritty-master/alacritty/src/display/hint.rs | pub fn highlighted_at(
term: &Term<T>,
config: &UiConfig,
point: Point,
mouse_mods: ModifiersState,
) -> Option<HintMatch> {
let mouse_mode = term.mode().intersects(TermMode::MOUSE_MODE);
config.hints.enabled.iter().find_map(|hint| {
// Check if all required modifiers are pressed.
let highlight = hint.mouse.is_some_and(|mouse| {
mouse.enabled
&& mouse_mods.contains(mouse.mods.0)
&& (!mouse_mode || mouse_mods.contains(ModifiersState::SHIFT))
});
if !highlight {
return None;
}
if let Some((hyperlink, bounds)) =
hint.content.hyperlinks.then(|| hyperlink_at(term, point)).flatten()
{
return Some(HintMatch { bounds, hyperlink: Some(hyperlink), hint: hint.clone() });
}
let bounds = hint.content.regex.as_ref().and_then(|regex| {
regex.with_compiled(|regex| regex_match_at(term, point, regex, hint.post_processing))
});
if let Some(bounds) = bounds.flatten() {
return Some(HintMatch { bounds, hint: hint.clone(), hyperlink: None });
}
None
})
} | use std::borrow::Cow;
use std::cmp::Reverse;
use std::collections::HashSet;
use std::iter;
use std::rc::Rc;
use ahash::RandomState;
use winit::keyboard::ModifiersState;
use alacritty_terminal::grid::{BidirectionalIterator, Dimensions};
use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point};
use alacritty_terminal::term::cell::Hyperlink;
use alacritty_terminal::term::search::{Match, RegexIter, RegexSearch};
use alacritty_terminal::term::{Term, TermMode};
use crate::config::ui_config::{Hint, HintAction};
use crate::config::UiConfig;
/// Maximum number of linewraps followed outside of the viewport during search highlighting.
pub const MAX_SEARCH_LINES: usize = 100;
/// Percentage of characters in the hints alphabet used for the last character.
const HINT_SPLIT_PERCENTAGE: f32 = 0.5;
/// Keyboard regex hint state.
pub struct HintState {
/// Hint currently in use.
hint: Option<Rc<Hint>>,
/// Alphabet for hint labels.
alphabet: String,
/// Visible matches.
matches: Vec<Match>,
/// Key label for each visible match.
labels: Vec<Vec<char>>,
/// Keys pressed for hint selection.
keys: Vec<char>,
}
impl HintState {
/// Initialize an inactive hint state.
pub fn new<S: Into<String>>(alphabet: S) -> Self {
Self {
alphabet: alphabet.into(),
hint: Default::default(),
matches: Default::default(),
labels: Default::default(),
keys: Default::default(),
}
}
/// Check if a hint selection is in progress.
pub fn active(&self) -> bool {
self.hint.is_some()
}
/// Start the hint selection process.
pub fn start(&mut self, hint: Rc<Hint>) {
self.hint = Some(hint);
}
/// Cancel the hint highlighting process.
fn stop(&mut self) {
self.matches.clear();
self.labels.clear();
self.keys.clear();
self.hint = None;
}
/// Update the visible hint matches and key labels.
pub fn update_matches<T>(&mut self, term: &Term<T>) {
let hint = match self.hint.as_mut() {
Some(hint) => hint,
None => return,
};
// Clear current matches.
self.matches.clear();
// Add escape sequence hyperlinks.
if hint.content.hyperlinks {
self.matches.extend(visible_unique_hyperlinks_iter(term));
}
// Add visible regex matches.
if let Some(regex) = hint.content.regex.as_ref() {
regex.with_compiled(|regex| {
let matches = visible_regex_match_iter(term, regex);
// Apply post-processing and search for sub-matches if necessary.
if hint.post_processing {
let mut matches = matches.collect::<Vec<_>>();
self.matches.extend(matches.drain(..).flat_map(|rm| {
HintPostProcessor::new(term, regex, rm).collect::<Vec<_>>()
}));
} else {
self.matches.extend(matches);
}
});
}
// Cancel highlight with no visible matches.
if self.matches.is_empty() {
self.stop();
return;
}
// Sort and dedup ranges. Currently overlapped but not exactly same ranges are kept.
self.matches.sort_by_key(|bounds| (*bounds.start(), Reverse(*bounds.end())));
self.matches.dedup_by_key(|bounds| *bounds.start());
let mut generator = HintLabels::new(&self.alphabet, HINT_SPLIT_PERCENTAGE);
let match_count = self.matches.len();
let keys_len = self.keys.len();
// Get the label for each match.
self.labels.resize(match_count, Vec::new());
for i in (0..match_count).rev() {
let mut label = generator.next();
if label.len() >= keys_len && label[..keys_len] == self.keys[..] {
self.labels[i] = label.split_off(keys_len);
} else {
self.labels[i] = Vec::new();
}
}
}
/// Handle keyboard input during hint selection.
pub fn keyboard_input<T>(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {
match c {
// Use backspace to remove the last character pressed.
'\x08' | '\x1f' => {
self.keys.pop();
},
// Cancel hint highlighting on ESC/Ctrl+c.
'\x1b' | '\x03' => self.stop(),
_ => (),
}
// Update the visible matches.
self.update_matches(term);
let hint = self.hint.as_ref()?;
// Find the last label starting with the input character.
let mut labels = self.labels.iter().enumerate().rev();
let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;
// Check if the selected label is fully matched.
if label.len() == 1 {
let bounds = self.matches[index].clone();
let hint = hint.clone();
// Exit hint mode unless it requires explicit dismissal.
if hint.persist {
self.keys.clear();
} else {
self.stop();
}
// Hyperlinks take precedence over regex matches.
let hyperlink = term.grid()[*bounds.start()].hyperlink();
Some(HintMatch { bounds, hyperlink, hint })
} else {
// Store character to preserve the selection.
self.keys.push(c);
None
}
}
/// Hint key labels.
pub fn labels(&self) -> &Vec<Vec<char>> {
&self.labels
}
/// Visible hint regex matches.
pub fn matches(&self) -> &[Match] {
&self.matches
}
/// Update the alphabet used for hint labels.
pub fn update_alphabet(&mut self, alphabet: &str) {
if self.alphabet != alphabet {
alphabet.clone_into(&mut self.alphabet);
self.keys.clear();
}
}
}
/// Hint match which was selected by the user.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct HintMatch {
/// Terminal range matching the hint.
bounds: Match,
/// OSC 8 hyperlink.
hyperlink: Option<Hyperlink>,
/// Hint which triggered this match.
hint: Rc<Hint>,
}
impl HintMatch {
#[inline]
pub fn should_highlight(&self, point: Point, pointed_hyperlink: Option<&Hyperlink>) -> bool {
self.hyperlink.as_ref() == pointed_hyperlink
&& (self.hyperlink.is_some() || self.bounds.contains(&point))
}
#[inline]
pub fn action(&self) -> &HintAction {
&self.hint.action
}
#[inline]
pub fn bounds(&self) -> &Match {
&self.bounds
}
pub fn hyperlink(&self) -> Option<&Hyperlink> {
self.hyperlink.as_ref()
}
/// Get the text content of the hint match.
///
/// This will always revalidate the hint text, to account for terminal content
/// changes since the [`HintMatch`] was constructed. The text of the hint might
/// be different from its original value, but it will **always** be a valid
/// match for this hint.
pub fn text<T>(&self, term: &Term<T>) -> Option<Cow<'_, str>> {
// Revalidate hyperlink match.
if let Some(hyperlink) = &self.hyperlink {
let (validated, bounds) = hyperlink_at(term, *self.bounds.start())?;
return (&validated == hyperlink && bounds == self.bounds)
.then(|| hyperlink.uri().into());
}
// Revalidate regex match.
let regex = self.hint.content.regex.as_ref()?;
let bounds = regex.with_compiled(|regex| {
regex_match_at(term, *self.bounds.start(), regex, self.hint.post_processing)
})??;
(bounds == self.bounds)
.then(|| term.bounds_to_string(*bounds.start(), *bounds.end()).into())
}
}
/// Generator for creating new hint labels.
struct HintLabels {
/// Full character set available.
alphabet: Vec<char>,
/// Alphabet indices for the next label.
indices: Vec<usize>,
/// Point separating the alphabet's head and tail characters.
///
/// To make identification of the tail character easy, part of the alphabet cannot be used for
/// any other position.
///
/// All characters in the alphabet before this index will be used for the last character, while
/// the rest will be used for everything else.
split_point: usize,
}
impl HintLabels {
/// Create a new label generator.
///
/// The `split_ratio` should be a number between 0.0 and 1.0 representing the percentage of
/// elements in the alphabet which are reserved for the tail of the hint label.
fn new(alphabet: impl Into<String>, split_ratio: f32) -> Self {
let alphabet: Vec<char> = alphabet.into().chars().collect();
let split_point = ((alphabet.len() - 1) as f32 * split_ratio.min(1.)) as usize;
Self { indices: vec![0], split_point, alphabet }
}
/// Get the characters for the next label.
fn next(&mut self) -> Vec<char> {
let characters = self.indices.iter().rev().map(|index| self.alphabet[*index]).collect();
self.increment();
characters
}
/// Increment the character sequence.
fn increment(&mut self) {
// Increment the last character; if it's not at the split point we're done.
let tail = &mut self.indices[0];
if *tail < self.split_point {
*tail += 1;
return;
}
*tail = 0;
// Increment all other characters in reverse order.
let alphabet_len = self.alphabet.len();
for index in self.indices.iter_mut().skip(1) {
if *index + 1 == alphabet_len {
// Reset character and move to the next if it's already at the limit.
*index = self.split_point + 1;
} else {
// If the character can be incremented, we're done.
*index += 1;
return;
}
}
// Extend the sequence with another character when nothing could be incremented.
self.indices.push(self.split_point + 1);
}
}
/// Iterate over all visible regex matches.
pub fn visible_regex_match_iter<'a, T>(
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> impl Iterator<Item = Match> + 'a {
let viewport_start = Line(-(term.grid().display_offset() as i32));
let viewport_end = viewport_start + term.bottommost_line();
let mut start = term.line_search_left(Point::new(viewport_start, Column(0)));
let mut end = term.line_search_right(Point::new(viewport_end, Column(0)));
start.line = start.line.max(viewport_start - MAX_SEARCH_LINES);
end.line = end.line.min(viewport_end + MAX_SEARCH_LINES);
RegexIter::new(start, end, Direction::Right, term, regex)
.skip_while(move |rm| rm.end().line < viewport_start)
.take_while(move |rm| rm.start().line <= viewport_end)
}
/// Iterate over all visible hyperlinks, yanking only unique ones.
pub fn visible_unique_hyperlinks_iter<T>(term: &Term<T>) -> impl Iterator<Item = Match> + '_ {
let mut display_iter = term.grid().display_iter().peekable();
// Avoid creating hints for the same hyperlinks, but from a different places.
let mut unique_hyperlinks = HashSet::<Hyperlink, RandomState>::default();
iter::from_fn(move || {
// Find the start of the next unique hyperlink.
let (cell, hyperlink) = display_iter.find_map(|cell| {
let hyperlink = cell.hyperlink()?;
(!unique_hyperlinks.contains(&hyperlink)).then(|| {
unique_hyperlinks.insert(hyperlink.clone());
(cell, hyperlink)
})
})?;
let start = cell.point;
let mut end = start;
// Find the end bound of just found unique hyperlink.
while let Some(next_cell) = display_iter.peek() {
// Cell at display iter doesn't match, yield the hyperlink and start over with
// `find_map`.
if next_cell.hyperlink().as_ref() != Some(&hyperlink) {
break;
}
// Advance to the next cell.
end = next_cell.point;
let _ = display_iter.next();
}
Some(start..=end)
})
}
/// Retrieve the match, if the specified point is inside the content matching the regex.
fn regex_match_at<T>(
term: &Term<T>,
point: Point,
regex: &mut RegexSearch,
post_processing: bool,
) -> Option<Match> {
let regex_match = visible_regex_match_iter(term, regex).find(|rm| rm.contains(&point))?;
// Apply post-processing and search for sub-matches if necessary.
if post_processing {
HintPostProcessor::new(term, regex, regex_match).find(|rm| rm.contains(&point))
} else {
Some(regex_match)
}
}
/// Check if there is a hint highlighted at the specified point.
pub fn highlighted_at<T>(
term: &Term<T>,
config: &UiConfig,
point: Point,
mouse_mods: ModifiersState,
) -> Option<HintMatch> {
let mouse_mode = term.mode().intersects(TermMode::MOUSE_MODE);
config.hints.enabled.iter().find_map(|hint| {
// Check if all required modifiers are pressed.
let highlight = hint.mouse.is_some_and(|mouse| {
mouse.enabled
&& mouse_mods.contains(mouse.mods.0)
&& (!mouse_mode || mouse_mods.contains(ModifiersState::SHIFT))
});
if !highlight {
return None;
}
if let Some((hyperlink, bounds)) =
hint.content.hyperlinks.then(|| hyperlink_at(term, point)).flatten()
{
return Some(HintMatch { bounds, hyperlink: Some(hyperlink), hint: hint.clone() });
}
let bounds = hint.content.regex.as_ref().and_then(|regex| {
regex.with_compiled(|regex| regex_match_at(term, point, regex, hint.post_processing))
});
if let Some(bounds) = bounds.flatten() {
return Some(HintMatch { bounds, hint: hint.clone(), hyperlink: None });
}
None
})
}
/// Retrieve the hyperlink with its range, if there is one at the specified point.
///
/// This will only return contiguous cells, even if another hyperlink with the same ID exists.
fn hyperlink_at<T>(term: &Term<T>, point: Point) -> Option<(Hyperlink, Match)> {
let hyperlink = term.grid()[point].hyperlink()?;
let grid = term.grid();
let mut match_end = point;
for cell in grid.iter_from(point) {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_end = cell.point;
} else {
break;
}
}
let mut match_start = point;
let mut iter = grid.iter_from(point);
while let Some(cell) = iter.prev() {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_start = cell.point;
} else {
break;
}
}
Some((hyperlink, match_start..=match_end))
}
/// Iterator over all post-processed matches inside an existing hint match.
struct HintPostProcessor<'a, T> {
/// Regex search DFAs.
regex: &'a mut RegexSearch,
/// Terminal reference.
term: &'a Term<T>,
/// Next hint match in the iterator.
next_match: Option<Match>,
/// Start point for the next search.
start: Point,
/// End point for the hint match iterator.
end: Point,
}
impl<'a, T> HintPostProcessor<'a, T> {
/// Create a new iterator for an unprocessed match.
fn new(term: &'a Term<T>, regex: &'a mut RegexSearch, regex_match: Match) -> Self {
let mut post_processor = Self {
next_match: None,
start: *regex_match.start(),
end: *regex_match.end(),
term,
regex,
};
// Post-process the first hint match.
post_processor.next_processed_match(regex_match);
post_processor
}
/// Apply some hint post processing heuristics.
///
/// This will check the end of the hint and make it shorter if certain characters are determined
/// to be unlikely to be intentionally part of the hint.
///
/// This is most useful for identifying URLs appropriately.
fn hint_post_processing(&self, regex_match: &Match) -> Option<Match> {
let mut iter = self.term.grid().iter_from(*regex_match.start());
let mut c = iter.cell().c;
// Truncate uneven number of brackets.
let end = *regex_match.end();
let mut open_parents = 0;
let mut open_brackets = 0;
loop {
match c {
'(' => open_parents += 1,
'[' => open_brackets += 1,
')' => {
if open_parents == 0 {
iter.prev();
break;
} else {
open_parents -= 1;
}
},
']' => {
if open_brackets == 0 {
iter.prev();
break;
} else {
open_brackets -= 1;
}
},
_ => (),
}
if iter.point() == end {
break;
}
match iter.next() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
// Truncate trailing characters which are likely to be delimiters.
let start = *regex_match.start();
while iter.point() != start {
if !matches!(c, '.' | ',' | ':' | ';' | '?' | '!' | '(' | '[' | '\'') {
break;
}
match iter.prev() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
if start > iter.point() {
None
} else {
Some(start..=iter.point())
}
}
/// Loop over submatches until a non-empty post-processed match is found.
fn next_processed_match(&mut self, mut regex_match: Match) {
self.next_match = loop {
if let Some(next_match) = self.hint_post_processing(®ex_match) {
self.start = next_match.end().add(self.term, Boundary::Grid, 1);
break Some(next_match);
}
self.start = regex_match.start().add(self.term, Boundary::Grid, 1);
if self.start > self.end {
return;
}
match self.term.regex_search_right(self.regex, self.start, self.end) {
Some(rm) => regex_match = rm,
None => return,
}
};
}
}
impl<T> Iterator for HintPostProcessor<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
let next_match = self.next_match.take()?;
if self.start <= self.end {
if let Some(rm) = self.term.regex_search_right(self.regex, self.start, self.end) {
self.next_processed_match(rm);
}
}
Some(next_match)
}
}
#[cfg(test)]
mod tests {
use alacritty_terminal::index::{Column, Line};
use alacritty_terminal::term::test::mock_term;
use alacritty_terminal::vte::ansi::Handler;
use super::*;
#[test]
fn hint_label_generation() {
let mut generator = HintLabels::new("0123", 0.5);
assert_eq!(generator.next(), vec!['0']);
assert_eq!(generator.next(), vec!['1']);
assert_eq!(generator.next(), vec!['2', '0']);
assert_eq!(generator.next(), vec!['2', '1']);
assert_eq!(generator.next(), vec!['3', '0']);
assert_eq!(generator.next(), vec!['3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '2', '3', '1']);
assert_eq!(generator.next(), vec!['2', '3', '2', '0']);
assert_eq!(generator.next(), vec!['2', '3', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '2', '1']);
assert_eq!(generator.next(), vec!['3', '2', '3', '0']);
assert_eq!(generator.next(), vec!['3', '2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '3', '1']);
}
#[test]
fn closed_bracket_does_not_result_in_infinite_iterator() {
let term = mock_term(" ) ");
let mut search = RegexSearch::new("[^/ ]").unwrap();
let count = HintPostProcessor::new(
&term,
&mut search,
Point::new(Line(0), Column(1))..=Point::new(Line(0), Column(1)),
)
.take(1)
.count();
assert_eq!(count, 0);
}
#[test]
fn collect_unique_hyperlinks() {
let mut term = mock_term("000\r\n111");
term.goto(0, 0);
let hyperlink_foo = Hyperlink::new(Some("1"), String::from("foo"));
let hyperlink_bar = Hyperlink::new(Some("2"), String::from("bar"));
// Create 2 hyperlinks on the first line.
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.clone().into()));
term.input('r');
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.goto(1, 0);
// Ditto for the second line.
term.set_hyperlink(Some(hyperlink_foo.into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.into()));
term.input('r');
term.set_hyperlink(None);
let mut unique_hyperlinks = visible_unique_hyperlinks_iter(&term);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(0)), Point::new(Line(0), Column(1)))),
unique_hyperlinks.next()
);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(2)), Point::new(Line(0), Column(2)))),
unique_hyperlinks.next()
);
assert_eq!(None, unique_hyperlinks.next());
}
#[test]
fn visible_regex_match_covers_entire_viewport() {
let content = "I'm a match!\r\n".repeat(4096);
// The Term returned from this call will have a viewport starting at 0 and ending at 4096.
// That's good enough for this test, since it only cares about visible content.
let term = mock_term(&content);
let mut regex = RegexSearch::new("match!").unwrap();
// The iterator should match everything in the viewport.
assert_eq!(visible_regex_match_iter(&term, &mut regex).count(), 4096);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct UiConfig {\n /// Miscellaneous configuration options.\n pub general: General,\n\n /// Extra environment variables.\n pub env: HashMap<String, String>,\n\n /// How much scrolling history to keep.\n pub scrolling: Scrolling,\n\n /// Cursor configuration.\n pub cursor: Cursor,\n\n /// Selection configuration.\n pub selection: Selection,\n\n /// Font configuration.\n pub font: Font,\n\n /// Window configuration.\n pub window: WindowConfig,\n\n /// Mouse configuration.\n pub mouse: Mouse,\n\n /// Debug options.\n pub debug: Debug,\n\n /// Bell configuration.\n pub bell: BellConfig,\n\n /// RGB values for colors.\n pub colors: Colors,\n\n /// Path where config was loaded from.\n #[config(skip)]\n pub config_paths: Vec<PathBuf>,\n\n /// Regex hints for interacting with terminal content.\n pub hints: Hints,\n\n /// Config for the alacritty_terminal itself.\n pub terminal: Terminal,\n\n /// Keyboard configuration.\n keyboard: Keyboard,\n\n /// Path to a shell program to run on startup.\n #[config(deprecated = \"use terminal.shell instead\")]\n shell: Option<Program>,\n\n /// Configuration file imports.\n ///\n /// This is never read since the field is directly accessed through the config's\n /// [`toml::Value`], but still present to prevent unused field warnings.\n #[config(deprecated = \"use general.import instead\")]\n import: Option<Vec<String>>,\n\n /// Shell startup directory.\n #[config(deprecated = \"use general.working_directory instead\")]\n working_directory: Option<PathBuf>,\n\n /// Live config reload.\n #[config(deprecated = \"use general.live_config_reload instead\")]\n live_config_reload: Option<bool>,\n\n /// Offer IPC through a unix socket.\n #[cfg(unix)]\n #[config(deprecated = \"use general.ipc_socket instead\")]\n pub ipc_socket: Option<bool>,\n}"
],
"name": "config",
"type": "&UiConfig"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
},
{
"definitions": [
" pub struct ModifiersState: u32 {\n /// The \"shift\" key.\n const SHIFT = 0b100;\n /// The \"control\" key.\n const CONTROL = 0b100 << 3;\n /// The \"alt\" key.\n const ALT = 0b100 << 6;\n /// This is the \"windows\" key on PC and \"command\" key on Mac.\n const SUPER = 0b100 << 9;\n }"
],
"name": "mouse_mods",
"type": "ModifiersState"
}
],
"end_line": 423,
"name": "highlighted_at",
"signature": "pub fn highlighted_at(\n term: &Term<T>,\n config: &UiConfig,\n point: Point,\n mouse_mods: ModifiersState,\n) -> Option<HintMatch>",
"start_line": 389
} | {
"class_name": "",
"class_signature": ""
} |
hyperlink_at | alacritty-master/alacritty/src/display/hint.rs | fn hyperlink_at(term: &Term<T>, point: Point) -> Option<(Hyperlink, Match)> {
let hyperlink = term.grid()[point].hyperlink()?;
let grid = term.grid();
let mut match_end = point;
for cell in grid.iter_from(point) {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_end = cell.point;
} else {
break;
}
}
let mut match_start = point;
let mut iter = grid.iter_from(point);
while let Some(cell) = iter.prev() {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_start = cell.point;
} else {
break;
}
}
Some((hyperlink, match_start..=match_end))
} | use std::borrow::Cow;
use std::cmp::Reverse;
use std::collections::HashSet;
use std::iter;
use std::rc::Rc;
use ahash::RandomState;
use winit::keyboard::ModifiersState;
use alacritty_terminal::grid::{BidirectionalIterator, Dimensions};
use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point};
use alacritty_terminal::term::cell::Hyperlink;
use alacritty_terminal::term::search::{Match, RegexIter, RegexSearch};
use alacritty_terminal::term::{Term, TermMode};
use crate::config::ui_config::{Hint, HintAction};
use crate::config::UiConfig;
/// Maximum number of linewraps followed outside of the viewport during search highlighting.
pub const MAX_SEARCH_LINES: usize = 100;
/// Percentage of characters in the hints alphabet used for the last character.
const HINT_SPLIT_PERCENTAGE: f32 = 0.5;
/// Keyboard regex hint state.
pub struct HintState {
/// Hint currently in use.
hint: Option<Rc<Hint>>,
/// Alphabet for hint labels.
alphabet: String,
/// Visible matches.
matches: Vec<Match>,
/// Key label for each visible match.
labels: Vec<Vec<char>>,
/// Keys pressed for hint selection.
keys: Vec<char>,
}
impl HintState {
/// Initialize an inactive hint state.
pub fn new<S: Into<String>>(alphabet: S) -> Self {
Self {
alphabet: alphabet.into(),
hint: Default::default(),
matches: Default::default(),
labels: Default::default(),
keys: Default::default(),
}
}
/// Check if a hint selection is in progress.
pub fn active(&self) -> bool {
self.hint.is_some()
}
/// Start the hint selection process.
pub fn start(&mut self, hint: Rc<Hint>) {
self.hint = Some(hint);
}
/// Cancel the hint highlighting process.
fn stop(&mut self) {
self.matches.clear();
self.labels.clear();
self.keys.clear();
self.hint = None;
}
/// Update the visible hint matches and key labels.
pub fn update_matches<T>(&mut self, term: &Term<T>) {
let hint = match self.hint.as_mut() {
Some(hint) => hint,
None => return,
};
// Clear current matches.
self.matches.clear();
// Add escape sequence hyperlinks.
if hint.content.hyperlinks {
self.matches.extend(visible_unique_hyperlinks_iter(term));
}
// Add visible regex matches.
if let Some(regex) = hint.content.regex.as_ref() {
regex.with_compiled(|regex| {
let matches = visible_regex_match_iter(term, regex);
// Apply post-processing and search for sub-matches if necessary.
if hint.post_processing {
let mut matches = matches.collect::<Vec<_>>();
self.matches.extend(matches.drain(..).flat_map(|rm| {
HintPostProcessor::new(term, regex, rm).collect::<Vec<_>>()
}));
} else {
self.matches.extend(matches);
}
});
}
// Cancel highlight with no visible matches.
if self.matches.is_empty() {
self.stop();
return;
}
// Sort and dedup ranges. Currently overlapped but not exactly same ranges are kept.
self.matches.sort_by_key(|bounds| (*bounds.start(), Reverse(*bounds.end())));
self.matches.dedup_by_key(|bounds| *bounds.start());
let mut generator = HintLabels::new(&self.alphabet, HINT_SPLIT_PERCENTAGE);
let match_count = self.matches.len();
let keys_len = self.keys.len();
// Get the label for each match.
self.labels.resize(match_count, Vec::new());
for i in (0..match_count).rev() {
let mut label = generator.next();
if label.len() >= keys_len && label[..keys_len] == self.keys[..] {
self.labels[i] = label.split_off(keys_len);
} else {
self.labels[i] = Vec::new();
}
}
}
/// Handle keyboard input during hint selection.
pub fn keyboard_input<T>(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {
match c {
// Use backspace to remove the last character pressed.
'\x08' | '\x1f' => {
self.keys.pop();
},
// Cancel hint highlighting on ESC/Ctrl+c.
'\x1b' | '\x03' => self.stop(),
_ => (),
}
// Update the visible matches.
self.update_matches(term);
let hint = self.hint.as_ref()?;
// Find the last label starting with the input character.
let mut labels = self.labels.iter().enumerate().rev();
let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;
// Check if the selected label is fully matched.
if label.len() == 1 {
let bounds = self.matches[index].clone();
let hint = hint.clone();
// Exit hint mode unless it requires explicit dismissal.
if hint.persist {
self.keys.clear();
} else {
self.stop();
}
// Hyperlinks take precedence over regex matches.
let hyperlink = term.grid()[*bounds.start()].hyperlink();
Some(HintMatch { bounds, hyperlink, hint })
} else {
// Store character to preserve the selection.
self.keys.push(c);
None
}
}
/// Hint key labels.
pub fn labels(&self) -> &Vec<Vec<char>> {
&self.labels
}
/// Visible hint regex matches.
pub fn matches(&self) -> &[Match] {
&self.matches
}
/// Update the alphabet used for hint labels.
pub fn update_alphabet(&mut self, alphabet: &str) {
if self.alphabet != alphabet {
alphabet.clone_into(&mut self.alphabet);
self.keys.clear();
}
}
}
/// Hint match which was selected by the user.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct HintMatch {
/// Terminal range matching the hint.
bounds: Match,
/// OSC 8 hyperlink.
hyperlink: Option<Hyperlink>,
/// Hint which triggered this match.
hint: Rc<Hint>,
}
impl HintMatch {
#[inline]
pub fn should_highlight(&self, point: Point, pointed_hyperlink: Option<&Hyperlink>) -> bool {
self.hyperlink.as_ref() == pointed_hyperlink
&& (self.hyperlink.is_some() || self.bounds.contains(&point))
}
#[inline]
pub fn action(&self) -> &HintAction {
&self.hint.action
}
#[inline]
pub fn bounds(&self) -> &Match {
&self.bounds
}
pub fn hyperlink(&self) -> Option<&Hyperlink> {
self.hyperlink.as_ref()
}
/// Get the text content of the hint match.
///
/// This will always revalidate the hint text, to account for terminal content
/// changes since the [`HintMatch`] was constructed. The text of the hint might
/// be different from its original value, but it will **always** be a valid
/// match for this hint.
pub fn text<T>(&self, term: &Term<T>) -> Option<Cow<'_, str>> {
// Revalidate hyperlink match.
if let Some(hyperlink) = &self.hyperlink {
let (validated, bounds) = hyperlink_at(term, *self.bounds.start())?;
return (&validated == hyperlink && bounds == self.bounds)
.then(|| hyperlink.uri().into());
}
// Revalidate regex match.
let regex = self.hint.content.regex.as_ref()?;
let bounds = regex.with_compiled(|regex| {
regex_match_at(term, *self.bounds.start(), regex, self.hint.post_processing)
})??;
(bounds == self.bounds)
.then(|| term.bounds_to_string(*bounds.start(), *bounds.end()).into())
}
}
/// Generator for creating new hint labels.
struct HintLabels {
/// Full character set available.
alphabet: Vec<char>,
/// Alphabet indices for the next label.
indices: Vec<usize>,
/// Point separating the alphabet's head and tail characters.
///
/// To make identification of the tail character easy, part of the alphabet cannot be used for
/// any other position.
///
/// All characters in the alphabet before this index will be used for the last character, while
/// the rest will be used for everything else.
split_point: usize,
}
impl HintLabels {
/// Create a new label generator.
///
/// The `split_ratio` should be a number between 0.0 and 1.0 representing the percentage of
/// elements in the alphabet which are reserved for the tail of the hint label.
fn new(alphabet: impl Into<String>, split_ratio: f32) -> Self {
let alphabet: Vec<char> = alphabet.into().chars().collect();
let split_point = ((alphabet.len() - 1) as f32 * split_ratio.min(1.)) as usize;
Self { indices: vec![0], split_point, alphabet }
}
/// Get the characters for the next label.
fn next(&mut self) -> Vec<char> {
let characters = self.indices.iter().rev().map(|index| self.alphabet[*index]).collect();
self.increment();
characters
}
/// Increment the character sequence.
fn increment(&mut self) {
// Increment the last character; if it's not at the split point we're done.
let tail = &mut self.indices[0];
if *tail < self.split_point {
*tail += 1;
return;
}
*tail = 0;
// Increment all other characters in reverse order.
let alphabet_len = self.alphabet.len();
for index in self.indices.iter_mut().skip(1) {
if *index + 1 == alphabet_len {
// Reset character and move to the next if it's already at the limit.
*index = self.split_point + 1;
} else {
// If the character can be incremented, we're done.
*index += 1;
return;
}
}
// Extend the sequence with another character when nothing could be incremented.
self.indices.push(self.split_point + 1);
}
}
/// Iterate over all visible regex matches.
pub fn visible_regex_match_iter<'a, T>(
term: &'a Term<T>,
regex: &'a mut RegexSearch,
) -> impl Iterator<Item = Match> + 'a {
let viewport_start = Line(-(term.grid().display_offset() as i32));
let viewport_end = viewport_start + term.bottommost_line();
let mut start = term.line_search_left(Point::new(viewport_start, Column(0)));
let mut end = term.line_search_right(Point::new(viewport_end, Column(0)));
start.line = start.line.max(viewport_start - MAX_SEARCH_LINES);
end.line = end.line.min(viewport_end + MAX_SEARCH_LINES);
RegexIter::new(start, end, Direction::Right, term, regex)
.skip_while(move |rm| rm.end().line < viewport_start)
.take_while(move |rm| rm.start().line <= viewport_end)
}
/// Iterate over all visible hyperlinks, yanking only unique ones.
pub fn visible_unique_hyperlinks_iter<T>(term: &Term<T>) -> impl Iterator<Item = Match> + '_ {
let mut display_iter = term.grid().display_iter().peekable();
// Avoid creating hints for the same hyperlinks, but from a different places.
let mut unique_hyperlinks = HashSet::<Hyperlink, RandomState>::default();
iter::from_fn(move || {
// Find the start of the next unique hyperlink.
let (cell, hyperlink) = display_iter.find_map(|cell| {
let hyperlink = cell.hyperlink()?;
(!unique_hyperlinks.contains(&hyperlink)).then(|| {
unique_hyperlinks.insert(hyperlink.clone());
(cell, hyperlink)
})
})?;
let start = cell.point;
let mut end = start;
// Find the end bound of just found unique hyperlink.
while let Some(next_cell) = display_iter.peek() {
// Cell at display iter doesn't match, yield the hyperlink and start over with
// `find_map`.
if next_cell.hyperlink().as_ref() != Some(&hyperlink) {
break;
}
// Advance to the next cell.
end = next_cell.point;
let _ = display_iter.next();
}
Some(start..=end)
})
}
/// Retrieve the match, if the specified point is inside the content matching the regex.
fn regex_match_at<T>(
term: &Term<T>,
point: Point,
regex: &mut RegexSearch,
post_processing: bool,
) -> Option<Match> {
let regex_match = visible_regex_match_iter(term, regex).find(|rm| rm.contains(&point))?;
// Apply post-processing and search for sub-matches if necessary.
if post_processing {
HintPostProcessor::new(term, regex, regex_match).find(|rm| rm.contains(&point))
} else {
Some(regex_match)
}
}
/// Check if there is a hint highlighted at the specified point.
pub fn highlighted_at<T>(
term: &Term<T>,
config: &UiConfig,
point: Point,
mouse_mods: ModifiersState,
) -> Option<HintMatch> {
let mouse_mode = term.mode().intersects(TermMode::MOUSE_MODE);
config.hints.enabled.iter().find_map(|hint| {
// Check if all required modifiers are pressed.
let highlight = hint.mouse.is_some_and(|mouse| {
mouse.enabled
&& mouse_mods.contains(mouse.mods.0)
&& (!mouse_mode || mouse_mods.contains(ModifiersState::SHIFT))
});
if !highlight {
return None;
}
if let Some((hyperlink, bounds)) =
hint.content.hyperlinks.then(|| hyperlink_at(term, point)).flatten()
{
return Some(HintMatch { bounds, hyperlink: Some(hyperlink), hint: hint.clone() });
}
let bounds = hint.content.regex.as_ref().and_then(|regex| {
regex.with_compiled(|regex| regex_match_at(term, point, regex, hint.post_processing))
});
if let Some(bounds) = bounds.flatten() {
return Some(HintMatch { bounds, hint: hint.clone(), hyperlink: None });
}
None
})
}
/// Retrieve the hyperlink with its range, if there is one at the specified point.
///
/// This will only return contiguous cells, even if another hyperlink with the same ID exists.
fn hyperlink_at<T>(term: &Term<T>, point: Point) -> Option<(Hyperlink, Match)> {
let hyperlink = term.grid()[point].hyperlink()?;
let grid = term.grid();
let mut match_end = point;
for cell in grid.iter_from(point) {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_end = cell.point;
} else {
break;
}
}
let mut match_start = point;
let mut iter = grid.iter_from(point);
while let Some(cell) = iter.prev() {
if cell.hyperlink().is_some_and(|link| link == hyperlink) {
match_start = cell.point;
} else {
break;
}
}
Some((hyperlink, match_start..=match_end))
}
/// Iterator over all post-processed matches inside an existing hint match.
struct HintPostProcessor<'a, T> {
/// Regex search DFAs.
regex: &'a mut RegexSearch,
/// Terminal reference.
term: &'a Term<T>,
/// Next hint match in the iterator.
next_match: Option<Match>,
/// Start point for the next search.
start: Point,
/// End point for the hint match iterator.
end: Point,
}
impl<'a, T> HintPostProcessor<'a, T> {
/// Create a new iterator for an unprocessed match.
fn new(term: &'a Term<T>, regex: &'a mut RegexSearch, regex_match: Match) -> Self {
let mut post_processor = Self {
next_match: None,
start: *regex_match.start(),
end: *regex_match.end(),
term,
regex,
};
// Post-process the first hint match.
post_processor.next_processed_match(regex_match);
post_processor
}
/// Apply some hint post processing heuristics.
///
/// This will check the end of the hint and make it shorter if certain characters are determined
/// to be unlikely to be intentionally part of the hint.
///
/// This is most useful for identifying URLs appropriately.
fn hint_post_processing(&self, regex_match: &Match) -> Option<Match> {
let mut iter = self.term.grid().iter_from(*regex_match.start());
let mut c = iter.cell().c;
// Truncate uneven number of brackets.
let end = *regex_match.end();
let mut open_parents = 0;
let mut open_brackets = 0;
loop {
match c {
'(' => open_parents += 1,
'[' => open_brackets += 1,
')' => {
if open_parents == 0 {
iter.prev();
break;
} else {
open_parents -= 1;
}
},
']' => {
if open_brackets == 0 {
iter.prev();
break;
} else {
open_brackets -= 1;
}
},
_ => (),
}
if iter.point() == end {
break;
}
match iter.next() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
// Truncate trailing characters which are likely to be delimiters.
let start = *regex_match.start();
while iter.point() != start {
if !matches!(c, '.' | ',' | ':' | ';' | '?' | '!' | '(' | '[' | '\'') {
break;
}
match iter.prev() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
if start > iter.point() {
None
} else {
Some(start..=iter.point())
}
}
/// Loop over submatches until a non-empty post-processed match is found.
fn next_processed_match(&mut self, mut regex_match: Match) {
self.next_match = loop {
if let Some(next_match) = self.hint_post_processing(®ex_match) {
self.start = next_match.end().add(self.term, Boundary::Grid, 1);
break Some(next_match);
}
self.start = regex_match.start().add(self.term, Boundary::Grid, 1);
if self.start > self.end {
return;
}
match self.term.regex_search_right(self.regex, self.start, self.end) {
Some(rm) => regex_match = rm,
None => return,
}
};
}
}
impl<T> Iterator for HintPostProcessor<'_, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
let next_match = self.next_match.take()?;
if self.start <= self.end {
if let Some(rm) = self.term.regex_search_right(self.regex, self.start, self.end) {
self.next_processed_match(rm);
}
}
Some(next_match)
}
}
#[cfg(test)]
mod tests {
use alacritty_terminal::index::{Column, Line};
use alacritty_terminal::term::test::mock_term;
use alacritty_terminal::vte::ansi::Handler;
use super::*;
#[test]
fn hint_label_generation() {
let mut generator = HintLabels::new("0123", 0.5);
assert_eq!(generator.next(), vec!['0']);
assert_eq!(generator.next(), vec!['1']);
assert_eq!(generator.next(), vec!['2', '0']);
assert_eq!(generator.next(), vec!['2', '1']);
assert_eq!(generator.next(), vec!['3', '0']);
assert_eq!(generator.next(), vec!['3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '2', '3', '1']);
assert_eq!(generator.next(), vec!['2', '3', '2', '0']);
assert_eq!(generator.next(), vec!['2', '3', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '2', '1']);
assert_eq!(generator.next(), vec!['3', '2', '3', '0']);
assert_eq!(generator.next(), vec!['3', '2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '3', '1']);
}
#[test]
fn closed_bracket_does_not_result_in_infinite_iterator() {
let term = mock_term(" ) ");
let mut search = RegexSearch::new("[^/ ]").unwrap();
let count = HintPostProcessor::new(
&term,
&mut search,
Point::new(Line(0), Column(1))..=Point::new(Line(0), Column(1)),
)
.take(1)
.count();
assert_eq!(count, 0);
}
#[test]
fn collect_unique_hyperlinks() {
let mut term = mock_term("000\r\n111");
term.goto(0, 0);
let hyperlink_foo = Hyperlink::new(Some("1"), String::from("foo"));
let hyperlink_bar = Hyperlink::new(Some("2"), String::from("bar"));
// Create 2 hyperlinks on the first line.
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.clone().into()));
term.input('r');
term.set_hyperlink(Some(hyperlink_foo.clone().into()));
term.goto(1, 0);
// Ditto for the second line.
term.set_hyperlink(Some(hyperlink_foo.into()));
term.input('b');
term.input('a');
term.set_hyperlink(Some(hyperlink_bar.into()));
term.input('r');
term.set_hyperlink(None);
let mut unique_hyperlinks = visible_unique_hyperlinks_iter(&term);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(0)), Point::new(Line(0), Column(1)))),
unique_hyperlinks.next()
);
assert_eq!(
Some(Match::new(Point::new(Line(0), Column(2)), Point::new(Line(0), Column(2)))),
unique_hyperlinks.next()
);
assert_eq!(None, unique_hyperlinks.next());
}
#[test]
fn visible_regex_match_covers_entire_viewport() {
let content = "I'm a match!\r\n".repeat(4096);
// The Term returned from this call will have a viewport starting at 0 and ending at 4096.
// That's good enough for this test, since it only cares about visible content.
let term = mock_term(&content);
let mut regex = RegexSearch::new("match!").unwrap();
// The iterator should match everything in the viewport.
assert_eq!(visible_regex_match_iter(&term, &mut regex).count(), 4096);
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 453,
"name": "hyperlink_at",
"signature": "fn hyperlink_at(term: &Term<T>, point: Point) -> Option<(Hyperlink, Match)>",
"start_line": 428
} | {
"class_name": "",
"class_signature": ""
} |
intensity_at_instant | alacritty-master/alacritty/src/display/bell.rs | pub fn intensity_at_instant(&self, instant: Instant) -> f64 {
// If `duration` is zero, then the VisualBell is disabled; therefore,
// its `intensity` is zero.
if self.duration == Duration::from_secs(0) {
return 0.0;
}
match self.start_time {
// Similarly, if `start_time` is `None`, then the VisualBell has not
// been "rung"; therefore, its `intensity` is zero.
None => 0.0,
Some(earlier) => {
// Finally, if the `instant` at which we wish to compute the
// VisualBell's `intensity` occurred before the VisualBell was
// "rung", then its `intensity` is also zero.
if instant < earlier {
return 0.0;
}
let elapsed = instant.duration_since(earlier);
let elapsed_f =
elapsed.as_secs() as f64 + f64::from(elapsed.subsec_nanos()) / 1e9f64;
let duration_f = self.duration.as_secs() as f64
+ f64::from(self.duration.subsec_nanos()) / 1e9f64;
// Otherwise, we compute a value `time` from 0.0 to 1.0
// inclusive that represents the ratio of `elapsed` time to the
// `duration` of the VisualBell.
let time = (elapsed_f / duration_f).min(1.0);
// We use this to compute the inverse `intensity` of the
// VisualBell. When `time` is 0.0, `inverse_intensity` is 0.0,
// and when `time` is 1.0, `inverse_intensity` is 1.0.
let inverse_intensity = match self.animation {
BellAnimation::Ease | BellAnimation::EaseOut => {
cubic_bezier(0.25, 0.1, 0.25, 1.0, time)
},
BellAnimation::EaseOutSine => cubic_bezier(0.39, 0.575, 0.565, 1.0, time),
BellAnimation::EaseOutQuad => cubic_bezier(0.25, 0.46, 0.45, 0.94, time),
BellAnimation::EaseOutCubic => cubic_bezier(0.215, 0.61, 0.355, 1.0, time),
BellAnimation::EaseOutQuart => cubic_bezier(0.165, 0.84, 0.44, 1.0, time),
BellAnimation::EaseOutQuint => cubic_bezier(0.23, 1.0, 0.32, 1.0, time),
BellAnimation::EaseOutExpo => cubic_bezier(0.19, 1.0, 0.22, 1.0, time),
BellAnimation::EaseOutCirc => cubic_bezier(0.075, 0.82, 0.165, 1.0, time),
BellAnimation::Linear => time,
};
// Since we want the `intensity` of the VisualBell to decay over
// `time`, we subtract the `inverse_intensity` from 1.0.
1.0 - inverse_intensity
},
}
} | use std::time::{Duration, Instant};
use crate::config::bell::{BellAnimation, BellConfig};
pub struct VisualBell {
/// Visual bell animation.
animation: BellAnimation,
/// Visual bell duration.
duration: Duration,
/// The last time the visual bell rang, if at all.
start_time: Option<Instant>,
}
impl VisualBell {
/// Ring the visual bell, and return its intensity.
pub fn ring(&mut self) -> f64 {
let now = Instant::now();
self.start_time = Some(now);
self.intensity_at_instant(now)
}
/// Get the currently intensity of the visual bell. The bell's intensity
/// ramps down from 1.0 to 0.0 at a rate determined by the bell's duration.
pub fn intensity(&self) -> f64 {
self.intensity_at_instant(Instant::now())
}
/// Check whether or not the visual bell has completed "ringing".
pub fn completed(&mut self) -> bool {
match self.start_time {
Some(earlier) => {
if Instant::now().duration_since(earlier) >= self.duration {
self.start_time = None;
}
false
},
None => true,
}
}
/// Get the intensity of the visual bell at a particular instant. The bell's
/// intensity ramps down from 1.0 to 0.0 at a rate determined by the bell's
/// duration.
pub fn intensity_at_instant(&self, instant: Instant) -> f64 {
// If `duration` is zero, then the VisualBell is disabled; therefore,
// its `intensity` is zero.
if self.duration == Duration::from_secs(0) {
return 0.0;
}
match self.start_time {
// Similarly, if `start_time` is `None`, then the VisualBell has not
// been "rung"; therefore, its `intensity` is zero.
None => 0.0,
Some(earlier) => {
// Finally, if the `instant` at which we wish to compute the
// VisualBell's `intensity` occurred before the VisualBell was
// "rung", then its `intensity` is also zero.
if instant < earlier {
return 0.0;
}
let elapsed = instant.duration_since(earlier);
let elapsed_f =
elapsed.as_secs() as f64 + f64::from(elapsed.subsec_nanos()) / 1e9f64;
let duration_f = self.duration.as_secs() as f64
+ f64::from(self.duration.subsec_nanos()) / 1e9f64;
// Otherwise, we compute a value `time` from 0.0 to 1.0
// inclusive that represents the ratio of `elapsed` time to the
// `duration` of the VisualBell.
let time = (elapsed_f / duration_f).min(1.0);
// We use this to compute the inverse `intensity` of the
// VisualBell. When `time` is 0.0, `inverse_intensity` is 0.0,
// and when `time` is 1.0, `inverse_intensity` is 1.0.
let inverse_intensity = match self.animation {
BellAnimation::Ease | BellAnimation::EaseOut => {
cubic_bezier(0.25, 0.1, 0.25, 1.0, time)
},
BellAnimation::EaseOutSine => cubic_bezier(0.39, 0.575, 0.565, 1.0, time),
BellAnimation::EaseOutQuad => cubic_bezier(0.25, 0.46, 0.45, 0.94, time),
BellAnimation::EaseOutCubic => cubic_bezier(0.215, 0.61, 0.355, 1.0, time),
BellAnimation::EaseOutQuart => cubic_bezier(0.165, 0.84, 0.44, 1.0, time),
BellAnimation::EaseOutQuint => cubic_bezier(0.23, 1.0, 0.32, 1.0, time),
BellAnimation::EaseOutExpo => cubic_bezier(0.19, 1.0, 0.22, 1.0, time),
BellAnimation::EaseOutCirc => cubic_bezier(0.075, 0.82, 0.165, 1.0, time),
BellAnimation::Linear => time,
};
// Since we want the `intensity` of the VisualBell to decay over
// `time`, we subtract the `inverse_intensity` from 1.0.
1.0 - inverse_intensity
},
}
}
pub fn update_config(&mut self, bell_config: &BellConfig) {
self.animation = bell_config.animation;
self.duration = bell_config.duration();
}
}
impl From<&BellConfig> for VisualBell {
fn from(bell_config: &BellConfig) -> VisualBell {
VisualBell {
animation: bell_config.animation,
duration: bell_config.duration(),
start_time: None,
}
}
}
fn cubic_bezier(p0: f64, p1: f64, p2: f64, p3: f64, x: f64) -> f64 {
(1.0 - x).powi(3) * p0
+ 3.0 * (1.0 - x).powi(2) * x * p1
+ 3.0 * (1.0 - x) * x.powi(2) * p2
+ x.powi(3) * p3
}
| rust | {
"argument_definitions": [
{
"definitions": [
"/// use std::time::{Duration, SystemTime};"
],
"name": "instant",
"type": "Instant"
}
],
"end_line": 99,
"name": "intensity_at_instant",
"signature": "pub fn intensity_at_instant(&self, instant: Instant) -> f64",
"start_line": 46
} | {
"class_name": "impl VisualBell {\n /// Ring the visual bell, and return its intensity.\n pub fn ring(&mut self) -> f64 {\n let now = Instant::now();\n self.start_time = Some(now);\n self.intensity_at_instant(now)\n }\n\n /// Get the currently intensity of the visual bell. The bell's intensity\n /// ramps down from 1.0 to 0.0 at a rate determined by the bell's duration.\n pub fn intensity(&self) -> f64 {\n self.intensity_at_instant(Instant::now())\n }\n\n /// Check whether or not the visual bell has completed \"ringing\".\n pub fn completed(&mut self) -> bool {\n match self.start_time {\n Some(earlier) => {\n if Instant::now().duration_since(earlier) >= self.duration {\n self.start_time = None;\n }\n false\n },\n None => true,\n }\n }\n\n /// Get the intensity of the visual bell at a particular instant. The bell's\n /// intensity ramps down from 1.0 to 0.0 at a rate determined by the bell's\n /// duration.\n pub fn intensity_at_instant(&self, instant: Instant) -> f64 {\n // If `duration` is zero, then the VisualBell is disabled; therefore,\n // its `intensity` is zero.\n if self.duration == Duration::from_secs(0) {\n return 0.0;\n }\n\n match self.start_time {\n // Similarly, if `start_time` is `None`, then the VisualBell has not\n // been \"rung\"; therefore, its `intensity` is zero.\n None => 0.0,\n\n Some(earlier) => {\n // Finally, if the `instant` at which we wish to compute the\n // VisualBell's `intensity` occurred before the VisualBell was\n // \"rung\", then its `intensity` is also zero.\n if instant < earlier {\n return 0.0;\n }\n\n let elapsed = instant.duration_since(earlier);\n let elapsed_f =\n elapsed.as_secs() as f64 + f64::from(elapsed.subsec_nanos()) / 1e9f64;\n let duration_f = self.duration.as_secs() as f64\n + f64::from(self.duration.subsec_nanos()) / 1e9f64;\n\n // Otherwise, we compute a value `time` from 0.0 to 1.0\n // inclusive that represents the ratio of `elapsed` time to the\n // `duration` of the VisualBell.\n let time = (elapsed_f / duration_f).min(1.0);\n\n // We use this to compute the inverse `intensity` of the\n // VisualBell. When `time` is 0.0, `inverse_intensity` is 0.0,\n // and when `time` is 1.0, `inverse_intensity` is 1.0.\n let inverse_intensity = match self.animation {\n BellAnimation::Ease | BellAnimation::EaseOut => {\n cubic_bezier(0.25, 0.1, 0.25, 1.0, time)\n },\n BellAnimation::EaseOutSine => cubic_bezier(0.39, 0.575, 0.565, 1.0, time),\n BellAnimation::EaseOutQuad => cubic_bezier(0.25, 0.46, 0.45, 0.94, time),\n BellAnimation::EaseOutCubic => cubic_bezier(0.215, 0.61, 0.355, 1.0, time),\n BellAnimation::EaseOutQuart => cubic_bezier(0.165, 0.84, 0.44, 1.0, time),\n BellAnimation::EaseOutQuint => cubic_bezier(0.23, 1.0, 0.32, 1.0, time),\n BellAnimation::EaseOutExpo => cubic_bezier(0.19, 1.0, 0.22, 1.0, time),\n BellAnimation::EaseOutCirc => cubic_bezier(0.075, 0.82, 0.165, 1.0, time),\n BellAnimation::Linear => time,\n };\n\n // Since we want the `intensity` of the VisualBell to decay over\n // `time`, we subtract the `inverse_intensity` from 1.0.\n 1.0 - inverse_intensity\n },\n }\n }\n\n pub fn update_config(&mut self, bell_config: &BellConfig) {\n self.animation = bell_config.animation;\n self.duration = bell_config.duration();\n }\n}",
"class_signature": "impl VisualBell"
} |
new | alacritty-master/alacritty/src/display/mod.rs | pub fn new(
width: f32,
height: f32,
cell_width: f32,
cell_height: f32,
mut padding_x: f32,
mut padding_y: f32,
dynamic_padding: bool,
) -> SizeInfo {
if dynamic_padding {
padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);
padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);
}
let lines = (height - 2. * padding_y) / cell_height;
let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);
let columns = (width - 2. * padding_x) / cell_width;
let columns = cmp::max(columns as usize, MIN_COLUMNS);
SizeInfo {
width,
height,
cell_width,
cell_height,
padding_x: padding_x.floor(),
padding_y: padding_y.floor(),
screen_lines,
columns,
}
} | //! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::cmp;
use std::fmt::{self, Formatter};
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU32;
use std::ops::Deref;
use std::time::{Duration, Instant};
use glutin::config::GetGlConfig;
use glutin::context::{NotCurrentContext, PossiblyCurrentContext};
use glutin::display::GetGlDisplay;
use glutin::error::ErrorKind;
use glutin::prelude::*;
use glutin::surface::{Surface, SwapInterval, WindowSurface};
use log::{debug, info};
use parking_lot::MutexGuard;
use serde::{Deserialize, Serialize};
use winit::dpi::PhysicalSize;
use winit::keyboard::ModifiersState;
use winit::raw_window_handle::RawWindowHandle;
use winit::window::CursorIcon;
use crossfont::{Rasterize, Rasterizer, Size as FontSize};
use unicode_width::UnicodeWidthChar;
use alacritty_terminal::event::{EventListener, OnResize, WindowSize};
use alacritty_terminal::grid::Dimensions as TermDimensions;
use alacritty_terminal::index::{Column, Direction, Line, Point};
use alacritty_terminal::selection::Selection;
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::{
self, LineDamageBounds, Term, TermDamage, TermMode, MIN_COLUMNS, MIN_SCREEN_LINES,
};
use alacritty_terminal::vte::ansi::{CursorShape, NamedColor};
use crate::config::debug::RendererPreference;
use crate::config::font::Font;
use crate::config::window::Dimensions;
#[cfg(not(windows))]
use crate::config::window::StartupMode;
use crate::config::UiConfig;
use crate::display::bell::VisualBell;
use crate::display::color::{List, Rgb};
use crate::display::content::{RenderableContent, RenderableCursor};
use crate::display::cursor::IntoRects;
use crate::display::damage::{damage_y_to_viewport_y, DamageTracker};
use crate::display::hint::{HintMatch, HintState};
use crate::display::meter::Meter;
use crate::display::window::Window;
use crate::event::{Event, EventType, Mouse, SearchState};
use crate::message_bar::{MessageBuffer, MessageType};
use crate::renderer::rects::{RenderLine, RenderLines, RenderRect};
use crate::renderer::{self, platform, GlyphCache, Renderer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::string::{ShortenDirection, StrShortener};
pub mod color;
pub mod content;
pub mod cursor;
pub mod hint;
pub mod window;
mod bell;
mod damage;
mod meter;
/// Label for the forward terminal search bar.
const FORWARD_SEARCH_LABEL: &str = "Search: ";
/// Label for the backward terminal search bar.
const BACKWARD_SEARCH_LABEL: &str = "Backward Search: ";
/// The character used to shorten the visible text like uri preview or search regex.
const SHORTENER: char = '…';
/// Color which is used to highlight damaged rects when debugging.
const DAMAGE_RECT_COLOR: Rgb = Rgb::new(255, 0, 255);
#[derive(Debug)]
pub enum Error {
/// Error with window management.
Window(window::Error),
/// Error dealing with fonts.
Font(crossfont::Error),
/// Error in renderer.
Render(renderer::Error),
/// Error during context operations.
Context(glutin::error::Error),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Window(err) => err.source(),
Error::Font(err) => err.source(),
Error::Render(err) => err.source(),
Error::Context(err) => err.source(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::Window(err) => err.fmt(f),
Error::Font(err) => err.fmt(f),
Error::Render(err) => err.fmt(f),
Error::Context(err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Self {
Error::Window(val)
}
}
impl From<crossfont::Error> for Error {
fn from(val: crossfont::Error) -> Self {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Self {
Error::Render(val)
}
}
impl From<glutin::error::Error> for Error {
fn from(val: glutin::error::Error) -> Self {
Error::Context(val)
}
}
/// Terminal size info.
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub struct SizeInfo<T = f32> {
/// Terminal window width.
width: T,
/// Terminal window height.
height: T,
/// Width of individual cell.
cell_width: T,
/// Height of individual cell.
cell_height: T,
/// Horizontal window padding.
padding_x: T,
/// Vertical window padding.
padding_y: T,
/// Number of lines in the viewport.
screen_lines: usize,
/// Number of columns in the viewport.
columns: usize,
}
impl From<SizeInfo<f32>> for SizeInfo<u32> {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
width: size_info.width as u32,
height: size_info.height as u32,
cell_width: size_info.cell_width as u32,
cell_height: size_info.cell_height as u32,
padding_x: size_info.padding_x as u32,
padding_y: size_info.padding_y as u32,
screen_lines: size_info.screen_lines,
columns: size_info.screen_lines,
}
}
}
impl From<SizeInfo<f32>> for WindowSize {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
num_cols: size_info.columns() as u16,
num_lines: size_info.screen_lines() as u16,
cell_width: size_info.cell_width() as u16,
cell_height: size_info.cell_height() as u16,
}
}
}
impl<T: Clone + Copy> SizeInfo<T> {
#[inline]
pub fn width(&self) -> T {
self.width
}
#[inline]
pub fn height(&self) -> T {
self.height
}
#[inline]
pub fn cell_width(&self) -> T {
self.cell_width
}
#[inline]
pub fn cell_height(&self) -> T {
self.cell_height
}
#[inline]
pub fn padding_x(&self) -> T {
self.padding_x
}
#[inline]
pub fn padding_y(&self) -> T {
self.padding_y
}
}
impl SizeInfo<f32> {
#[allow(clippy::too_many_arguments)]
pub fn new(
width: f32,
height: f32,
cell_width: f32,
cell_height: f32,
mut padding_x: f32,
mut padding_y: f32,
dynamic_padding: bool,
) -> SizeInfo {
if dynamic_padding {
padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);
padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);
}
let lines = (height - 2. * padding_y) / cell_height;
let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);
let columns = (width - 2. * padding_x) / cell_width;
let columns = cmp::max(columns as usize, MIN_COLUMNS);
SizeInfo {
width,
height,
cell_width,
cell_height,
padding_x: padding_x.floor(),
padding_y: padding_y.floor(),
screen_lines,
columns,
}
}
#[inline]
pub fn reserve_lines(&mut self, count: usize) {
self.screen_lines = cmp::max(self.screen_lines.saturating_sub(count), MIN_SCREEN_LINES);
}
/// Check if coordinates are inside the terminal grid.
///
/// The padding, message bar or search are not counted as part of the grid.
#[inline]
pub fn contains_point(&self, x: usize, y: usize) -> bool {
x <= (self.padding_x + self.columns as f32 * self.cell_width) as usize
&& x > self.padding_x as usize
&& y <= (self.padding_y + self.screen_lines as f32 * self.cell_height) as usize
&& y > self.padding_y as usize
}
/// Calculate padding to spread it evenly around the terminal content.
#[inline]
fn dynamic_padding(padding: f32, dimension: f32, cell_dimension: f32) -> f32 {
padding + ((dimension - 2. * padding) % cell_dimension) / 2.
}
}
impl TermDimensions for SizeInfo {
#[inline]
fn columns(&self) -> usize {
self.columns
}
#[inline]
fn screen_lines(&self) -> usize {
self.screen_lines
}
#[inline]
fn total_lines(&self) -> usize {
self.screen_lines()
}
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct DisplayUpdate {
pub dirty: bool,
dimensions: Option<PhysicalSize<u32>>,
cursor_dirty: bool,
font: Option<Font>,
}
impl DisplayUpdate {
pub fn dimensions(&self) -> Option<PhysicalSize<u32>> {
self.dimensions
}
pub fn font(&self) -> Option<&Font> {
self.font.as_ref()
}
pub fn cursor_dirty(&self) -> bool {
self.cursor_dirty
}
pub fn set_dimensions(&mut self, dimensions: PhysicalSize<u32>) {
self.dimensions = Some(dimensions);
self.dirty = true;
}
pub fn set_font(&mut self, font: Font) {
self.font = Some(font);
self.dirty = true;
}
pub fn set_cursor_dirty(&mut self) {
self.cursor_dirty = true;
self.dirty = true;
}
}
/// The display wraps a window, font rasterizer, and GPU renderer.
pub struct Display {
pub window: Window,
pub size_info: SizeInfo,
/// Hint highlighted by the mouse.
pub highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
highlighted_hint_age: usize,
/// Hint highlighted by the vi mode cursor.
pub vi_highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
vi_highlighted_hint_age: usize,
pub raw_window_handle: RawWindowHandle,
/// UI cursor visibility for blinking.
pub cursor_hidden: bool,
pub visual_bell: VisualBell,
/// Mapped RGB values for each terminal color.
pub colors: List,
/// State of the keyboard hints.
pub hint_state: HintState,
/// Unprocessed display updates.
pub pending_update: DisplayUpdate,
/// The renderer update that takes place only once before the actual rendering.
pub pending_renderer_update: Option<RendererUpdate>,
/// The ime on the given display.
pub ime: Ime,
/// The state of the timer for frame scheduling.
pub frame_timer: FrameTimer,
/// Damage tracker for the given display.
pub damage_tracker: DamageTracker,
/// Font size used by the window.
pub font_size: FontSize,
// Mouse point position when highlighting hints.
hint_mouse_point: Option<Point>,
renderer: ManuallyDrop<Renderer>,
renderer_preference: Option<RendererPreference>,
surface: ManuallyDrop<Surface<WindowSurface>>,
context: ManuallyDrop<PossiblyCurrentContext>,
glyph_cache: GlyphCache,
meter: Meter,
}
impl Display {
pub fn new(
window: Window,
gl_context: NotCurrentContext,
config: &UiConfig,
_tabbed: bool,
) -> Result<Display, Error> {
let raw_window_handle = window.raw_window_handle();
let scale_factor = window.scale_factor as f32;
let rasterizer = Rasterizer::new()?;
let font_size = config.font.size().scale(scale_factor);
debug!("Loading \"{}\" font", &config.font.normal().family);
let font = config.font.clone().with_size(font_size);
let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;
let metrics = glyph_cache.font_metrics();
let (cell_width, cell_height) = compute_cell_size(config, &metrics);
// Resize the window to account for the user configured size.
if let Some(dimensions) = config.window.dimensions() {
let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);
window.request_inner_size(size);
}
// Create the GL surface to draw into.
let surface = platform::create_gl_surface(
&gl_context,
window.inner_size(),
window.raw_window_handle(),
)?;
// Make the context current.
let context = gl_context.make_current(&surface)?;
// Create renderer.
let mut renderer = Renderer::new(&context, config.debug.renderer)?;
// Load font common glyphs to accelerate rendering.
debug!("Filling glyph cache with common glyphs");
renderer.with_loader(|mut api| {
glyph_cache.reset_glyph_cache(&mut api);
});
let padding = config.window.padding(window.scale_factor as f32);
let viewport_size = window.inner_size();
// Create new size with at least one column and row.
let size_info = SizeInfo::new(
viewport_size.width as f32,
viewport_size.height as f32,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding && config.window.dimensions().is_none(),
);
info!("Cell size: {} x {}", cell_width, cell_height);
info!("Padding: {} x {}", size_info.padding_x(), size_info.padding_y());
info!("Width: {}, Height: {}", size_info.width(), size_info.height());
// Update OpenGL projection.
renderer.resize(&size_info);
// Clear screen.
let background_color = config.colors.primary.background;
renderer.clear(background_color, config.window_opacity());
// Disable shadows for transparent windows on macOS.
#[cfg(target_os = "macos")]
window.set_has_shadow(config.window_opacity() >= 1.0);
let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));
// On Wayland we can safely ignore this call, since the window isn't visible until you
// actually draw something into it and commit those changes.
if !is_wayland {
surface.swap_buffers(&context).expect("failed to swap buffers.");
renderer.finish();
}
// Set resize increments for the newly created window.
if config.window.resize_increments {
window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
window.set_visible(true);
// Always focus new windows, even if no Alacritty window is currently focused.
#[cfg(target_os = "macos")]
window.focus_window();
#[allow(clippy::single_match)]
#[cfg(not(windows))]
if !_tabbed {
match config.window.startup_mode {
#[cfg(target_os = "macos")]
StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),
StartupMode::Maximized if !is_wayland => window.set_maximized(true),
_ => (),
}
}
let hint_state = HintState::new(config.hints.alphabet());
let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());
damage_tracker.debug = config.debug.highlight_damage;
// Disable vsync.
if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {
info!("Failed to disable vsync: {}", err);
}
Ok(Self {
context: ManuallyDrop::new(context),
visual_bell: VisualBell::from(&config.bell),
renderer: ManuallyDrop::new(renderer),
renderer_preference: config.debug.renderer,
surface: ManuallyDrop::new(surface),
colors: List::from(&config.colors),
frame_timer: FrameTimer::new(),
raw_window_handle,
damage_tracker,
glyph_cache,
hint_state,
size_info,
font_size,
window,
pending_renderer_update: Default::default(),
vi_highlighted_hint_age: Default::default(),
highlighted_hint_age: Default::default(),
vi_highlighted_hint: Default::default(),
highlighted_hint: Default::default(),
hint_mouse_point: Default::default(),
pending_update: Default::default(),
cursor_hidden: Default::default(),
meter: Default::default(),
ime: Default::default(),
})
}
#[inline]
pub fn gl_context(&self) -> &PossiblyCurrentContext {
&self.context
}
pub fn make_not_current(&mut self) {
if self.context.is_current() {
self.context.make_not_current_in_place().expect("failed to disable context");
}
}
pub fn make_current(&mut self) {
let is_current = self.context.is_current();
// Attempt to make the context current if it's not.
let context_loss = if is_current {
self.renderer.was_context_reset()
} else {
match self.context.make_current(&self.surface) {
Err(err) if err.error_kind() == ErrorKind::ContextLost => {
info!("Context lost for window {:?}", self.window.id());
true
},
_ => false,
}
};
if !context_loss {
return;
}
let gl_display = self.context.display();
let gl_config = self.context.config();
let raw_window_handle = Some(self.window.raw_window_handle());
let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)
.expect("failed to recreate context.");
// Drop the old context and renderer.
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
}
// Activate new context.
let context = context.treat_as_possibly_current();
self.context = ManuallyDrop::new(context);
self.context.make_current(&self.surface).expect("failed to reativate context after reset.");
// Recreate renderer.
let renderer = Renderer::new(&self.context, self.renderer_preference)
.expect("failed to recreate renderer after reset");
self.renderer = ManuallyDrop::new(renderer);
// Resize the renderer.
self.renderer.resize(&self.size_info);
self.reset_glyph_cache();
self.damage_tracker.frame().mark_fully_damaged();
debug!("Recovered window {:?} from gpu reset", self.window.id());
}
fn swap_buffers(&self) {
#[allow(clippy::single_match)]
let res = match (self.surface.deref(), &self.context.deref()) {
#[cfg(not(any(target_os = "macos", windows)))]
(Surface::Egl(surface), PossiblyCurrentContext::Egl(context))
if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))
&& !self.damage_tracker.debug =>
{
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
surface.swap_buffers_with_damage(context, &damage)
},
(surface, context) => surface.swap_buffers(context),
};
if let Err(err) = res {
debug!("error calling swap_buffers: {}", err);
}
}
/// Update font size and cell dimensions.
///
/// This will return a tuple of the cell width and height.
fn update_font_size(
glyph_cache: &mut GlyphCache,
config: &UiConfig,
font: &Font,
) -> (f32, f32) {
let _ = glyph_cache.update_font_size(font);
// Compute new cell sizes.
compute_cell_size(config, &glyph_cache.font_metrics())
}
/// Reset glyph cache.
fn reset_glyph_cache(&mut self) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
cache.reset_glyph_cache(&mut api);
});
}
// XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are
// performed in [`Self::process_renderer_update`] right before drawing.
//
/// Process update events.
pub fn handle_update<T>(
&mut self,
terminal: &mut Term<T>,
pty_resize_handle: &mut dyn OnResize,
message_buffer: &MessageBuffer,
search_state: &mut SearchState,
config: &UiConfig,
) where
T: EventListener,
{
let pending_update = mem::take(&mut self.pending_update);
let (mut cell_width, mut cell_height) =
(self.size_info.cell_width(), self.size_info.cell_height());
if pending_update.font().is_some() || pending_update.cursor_dirty() {
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.clear_font_cache = true
}
// Update font size and cell dimensions.
if let Some(font) = pending_update.font() {
let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);
cell_width = cell_dimensions.0;
cell_height = cell_dimensions.1;
info!("Cell size: {} x {}", cell_width, cell_height);
// Mark entire terminal as damaged since glyph size could change without cell size
// changes.
self.damage_tracker.frame().mark_fully_damaged();
}
let (mut width, mut height) = (self.size_info.width(), self.size_info.height());
if let Some(dimensions) = pending_update.dimensions() {
width = dimensions.width as f32;
height = dimensions.height as f32;
}
let padding = config.window.padding(self.window.scale_factor as f32);
let mut new_size = SizeInfo::new(
width,
height,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding,
);
// Update number of column/lines in the viewport.
let search_active = search_state.history_index.is_some();
let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());
let search_lines = usize::from(search_active);
new_size.reserve_lines(message_bar_lines + search_lines);
// Update resize increments.
if config.window.resize_increments {
self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
// Resize when terminal when its dimensions have changed.
if self.size_info.screen_lines() != new_size.screen_lines
|| self.size_info.columns() != new_size.columns()
{
// Resize PTY.
pty_resize_handle.on_resize(new_size.into());
// Resize terminal.
terminal.resize(new_size);
// Resize damage tracking.
self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());
}
// Check if dimensions have changed.
if new_size != self.size_info {
// Queue renderer update.
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.resize = true;
// Clear focused search match.
search_state.clear_focused_match();
}
self.size_info = new_size;
}
// NOTE: Renderer updates are split off, since platforms like Wayland require resize and other
// OpenGL operations to be performed right before rendering. Otherwise they could lock the
// back buffer and render with the previous state. This also solves flickering during resizes.
//
/// Update the state of the renderer.
pub fn process_renderer_update(&mut self) {
let renderer_update = match self.pending_renderer_update.take() {
Some(renderer_update) => renderer_update,
_ => return,
};
// Resize renderer.
if renderer_update.resize {
let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();
let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();
self.surface.resize(&self.context, width, height);
}
// Ensure we're modifying the correct OpenGL context.
self.make_current();
if renderer_update.clear_font_cache {
self.reset_glyph_cache();
}
self.renderer.resize(&self.size_info);
info!("Padding: {} x {}", self.size_info.padding_x(), self.size_info.padding_y());
info!("Width: {}, Height: {}", self.size_info.width(), self.size_info.height());
}
/// Draw the screen.
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled.
pub fn draw<T: EventListener>(
&mut self,
mut terminal: MutexGuard<'_, Term<T>>,
scheduler: &mut Scheduler,
message_buffer: &MessageBuffer,
config: &UiConfig,
search_state: &mut SearchState,
) {
// Collect renderable content before the terminal is dropped.
let mut content = RenderableContent::new(config, self, &terminal, search_state);
let mut grid_cells = Vec::new();
for cell in &mut content {
grid_cells.push(cell);
}
let selection_range = content.selection_range();
let foreground_color = content.color(NamedColor::Foreground as usize);
let background_color = content.color(NamedColor::Background as usize);
let display_offset = content.display_offset();
let cursor = content.cursor();
let cursor_point = terminal.grid().cursor.point;
let total_lines = terminal.grid().total_lines();
let metrics = self.glyph_cache.font_metrics();
let size_info = self.size_info;
let vi_mode = terminal.mode().contains(TermMode::VI);
let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };
// Add damage from the terminal.
match terminal.damage() {
TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),
TermDamage::Partial(damaged_lines) => {
for damage in damaged_lines {
self.damage_tracker.frame().damage_line(damage);
}
},
}
terminal.reset_damage();
// Drop terminal as early as possible to free lock.
drop(terminal);
// Invalidate highlighted hints if grid has changed.
self.validate_hint_highlights(display_offset);
// Add damage from alacritty's UI elements overlapping terminal.
let requires_full_damage = self.visual_bell.intensity() != 0.
|| self.hint_state.active()
|| search_state.regex().is_some();
if requires_full_damage {
self.damage_tracker.frame().mark_fully_damaged();
self.damage_tracker.next_frame().mark_fully_damaged();
}
let vi_cursor_viewport_point =
vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));
self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);
self.damage_tracker.damage_selection(selection_range, display_offset);
// Make sure this window's OpenGL context is active.
self.make_current();
self.renderer.clear(background_color, config.window_opacity());
let mut lines = RenderLines::new();
// Optimize loop hint comparator.
let has_highlighted_hint =
self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();
// Draw grid.
{
let _sampler = self.meter.sampler();
// Ensure macOS hasn't reset our viewport.
#[cfg(target_os = "macos")]
self.renderer.set_viewport(&size_info);
let glyph_cache = &mut self.glyph_cache;
let highlighted_hint = &self.highlighted_hint;
let vi_highlighted_hint = &self.vi_highlighted_hint;
let damage_tracker = &mut self.damage_tracker;
let cells = grid_cells.into_iter().map(|mut cell| {
// Underline hints hovered by mouse or vi mode cursor.
if has_highlighted_hint {
let point = term::viewport_to_point(display_offset, cell.point);
let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());
let should_highlight = |hint: &Option<HintMatch>| {
hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))
};
if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {
damage_tracker.frame().damage_point(cell.point);
cell.flags.insert(Flags::UNDERLINE);
}
}
// Update underline/strikeout.
lines.update(&cell);
cell
});
self.renderer.draw_cells(&size_info, glyph_cache, cells);
}
let mut rects = lines.rects(&metrics, &size_info);
if let Some(vi_cursor_point) = vi_cursor_point {
// Indicate vi mode by showing the cursor's position in the top right corner.
let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;
let obstructed_column = Some(vi_cursor_point)
.filter(|point| point.line == -(display_offset as i32))
.map(|point| point.column);
self.draw_line_indicator(config, total_lines, obstructed_column, line);
} else if search_state.regex().is_some() {
// Show current display offset in vi-less search to indicate match position.
self.draw_line_indicator(config, total_lines, None, display_offset);
};
// Draw cursor.
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
// Push visual bell after url/underline/strikeout rects.
let visual_bell_intensity = self.visual_bell.intensity();
if visual_bell_intensity != 0. {
let visual_bell_rect = RenderRect::new(
0.,
0.,
size_info.width(),
size_info.height(),
config.bell.color,
visual_bell_intensity as f32,
);
rects.push(visual_bell_rect);
}
// Handle IME positioning and search bar rendering.
let ime_position = match search_state.regex() {
Some(regex) => {
let search_label = match search_state.direction() {
Direction::Right => FORWARD_SEARCH_LABEL,
Direction::Left => BACKWARD_SEARCH_LABEL,
};
let search_text = Self::format_search(regex, search_label, size_info.columns());
// Render the search bar.
self.draw_search(config, &search_text);
// Draw search bar cursor.
let line = size_info.screen_lines();
let column = Column(search_text.chars().count() - 1);
// Add cursor to search bar if IME is not active.
if self.ime.preedit().is_none() {
let fg = config.colors.footer_bar_foreground();
let shape = CursorShape::Underline;
let cursor_width = NonZeroU32::new(1).unwrap();
let cursor =
RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
}
Some(Point::new(line, column))
},
None => {
let num_lines = self.size_info.screen_lines();
match vi_cursor_viewport_point {
None => term::point_to_viewport(display_offset, cursor_point)
.filter(|point| point.line < num_lines),
point => point,
}
},
};
// Handle IME.
if self.ime.is_enabled() {
if let Some(point) = ime_position {
let (fg, bg) = if search_state.regex().is_some() {
(config.colors.footer_bar_foreground(), config.colors.footer_bar_background())
} else {
(foreground_color, background_color)
};
self.draw_ime_preview(point, fg, bg, &mut rects, config);
}
}
if let Some(message) = message_buffer.message() {
let search_offset = usize::from(search_state.regex().is_some());
let text = message.text(&size_info);
// Create a new rectangle for the background.
let start_line = size_info.screen_lines() + search_offset;
let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());
let bg = match message.ty() {
MessageType::Error => config.colors.normal.red,
MessageType::Warning => config.colors.normal.yellow,
};
let x = 0;
let width = size_info.width() as i32;
let height = (size_info.height() - y) as i32;
let message_bar_rect =
RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);
// Push message_bar in the end, so it'll be above all other content.
rects.push(message_bar_rect);
// Always damage message bar, since it could have messages of the same size in it.
self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
// Relay messages to the user.
let glyph_cache = &mut self.glyph_cache;
let fg = config.colors.primary.background;
for (i, message_text) in text.iter().enumerate() {
let point = Point::new(start_line + i, Column(0));
self.renderer.draw_string(
point,
fg,
bg,
message_text.chars(),
&size_info,
glyph_cache,
);
}
} else {
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
}
self.draw_render_timer(config);
// Draw hyperlink uri preview.
if has_highlighted_hint {
let cursor_point = vi_cursor_point.or(Some(cursor_point));
self.draw_hyperlink_preview(config, cursor_point, display_offset);
}
// Notify winit that we're about to present.
self.window.pre_present_notify();
// Highlight damage for debugging.
if self.damage_tracker.debug {
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
let mut rects = Vec::with_capacity(damage.len());
self.highlight_damage(&mut rects);
self.renderer.draw_rects(&self.size_info, &metrics, rects);
}
// Clearing debug highlights from the previous frame requires full redraw.
self.swap_buffers();
if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {
// On X11 `swap_buffers` does not block for vsync. However the next OpenGl command
// will block to synchronize (this is `glClear` in Alacritty), which causes a
// permanent one frame delay.
self.renderer.finish();
}
// XXX: Request the new frame after swapping buffers, so the
// time to finish OpenGL operations is accounted for in the timeout.
if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {
self.request_frame(scheduler);
}
self.damage_tracker.swap_damage();
}
/// Update to a new configuration.
pub fn update_config(&mut self, config: &UiConfig) {
self.damage_tracker.debug = config.debug.highlight_damage;
self.visual_bell.update_config(&config.bell);
self.colors = List::from(&config.colors);
}
/// Update the mouse/vi mode cursor hint highlighting.
///
/// This will return whether the highlighted hints changed.
pub fn update_highlighted_hints<T>(
&mut self,
term: &Term<T>,
config: &UiConfig,
mouse: &Mouse,
modifiers: ModifiersState,
) -> bool {
// Update vi mode cursor hint.
let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {
let mods = ModifiersState::all();
let point = term.vi_mode_cursor.point;
hint::highlighted_at(term, config, point, mods)
} else {
None
};
let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;
self.vi_highlighted_hint = vi_highlighted_hint;
self.vi_highlighted_hint_age = 0;
// Force full redraw if the vi mode highlight was cleared.
if dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
// Abort if mouse highlighting conditions are not met.
if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {
if self.highlighted_hint.take().is_some() {
self.damage_tracker.frame().mark_fully_damaged();
dirty = true;
}
return dirty;
}
// Find highlighted hint at mouse position.
let point = mouse.point(&self.size_info, term.grid().display_offset());
let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);
// Update cursor shape.
if highlighted_hint.is_some() {
// If mouse changed the line, we should update the hyperlink preview, since the
// highlighted hint could be disrupted by the old preview.
dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);
self.hint_mouse_point = Some(point);
self.window.set_mouse_cursor(CursorIcon::Pointer);
} else if self.highlighted_hint.is_some() {
self.hint_mouse_point = None;
if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {
self.window.set_mouse_cursor(CursorIcon::Default);
} else {
self.window.set_mouse_cursor(CursorIcon::Text);
}
}
let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;
dirty |= mouse_highlight_dirty;
self.highlighted_hint = highlighted_hint;
self.highlighted_hint_age = 0;
// Force full redraw if the mouse cursor highlight was changed.
if mouse_highlight_dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
dirty
}
#[inline(never)]
fn draw_ime_preview(
&mut self,
point: Point<usize>,
fg: Rgb,
bg: Rgb,
rects: &mut Vec<RenderRect>,
config: &UiConfig,
) {
let preedit = match self.ime.preedit() {
Some(preedit) => preedit,
None => {
// In case we don't have preedit, just set the popup point.
self.window.update_ime_position(point, &self.size_info);
return;
},
};
let num_cols = self.size_info.columns();
// Get the visible preedit.
let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {
(Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(
&preedit.text[byte_offset.0..],
num_cols,
ShortenDirection::Right,
Some(SHORTENER),
),
_ => {
StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))
},
}
.collect();
let visible_len = visible_text.chars().count();
let end = cmp::min(point.column.0 + visible_len, num_cols);
let start = end.saturating_sub(visible_len);
let start = Point::new(point.line, Column(start));
let end = Point::new(point.line, Column(end - 1));
let glyph_cache = &mut self.glyph_cache;
let metrics = glyph_cache.font_metrics();
self.renderer.draw_string(
start,
fg,
bg,
visible_text.chars(),
&self.size_info,
glyph_cache,
);
// Damage preedit inside the terminal viewport.
if point.line < self.size_info.screen_lines() {
let damage = LineDamageBounds::new(start.line, 0, num_cols);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
}
// Add underline for preedit text.
let underline = RenderLine { start, end, color: fg };
rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));
let ime_popup_point = match preedit.cursor_end_offset {
Some(cursor_end_offset) => {
// Use hollow block when multiple characters are changed at once.
let (shape, width) = if let Some(width) =
NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)
{
(CursorShape::HollowBlock, width)
} else {
(CursorShape::Beam, NonZeroU32::new(1).unwrap())
};
let cursor_column = Column(
(end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,
);
let cursor_point = Point::new(point.line, cursor_column);
let cursor = RenderableCursor::new(cursor_point, shape, fg, width);
rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));
cursor_point
},
_ => end,
};
self.window.update_ime_position(ime_popup_point, &self.size_info);
}
/// Format search regex to account for the cursor and fullwidth characters.
fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {
let label_len = search_label.len();
// Skip `search_regex` formatting if only label is visible.
if label_len > max_width {
return search_label[..max_width].to_owned();
}
// The search string consists of `search_label` + `search_regex` + `cursor`.
let mut bar_text = String::from(search_label);
bar_text.extend(StrShortener::new(
search_regex,
max_width.wrapping_sub(label_len + 1),
ShortenDirection::Left,
Some(SHORTENER),
));
// Add place for cursor.
bar_text.push(' ');
bar_text
}
/// Draw preview for the currently highlighted `Hyperlink`.
#[inline(never)]
fn draw_hyperlink_preview(
&mut self,
config: &UiConfig,
cursor_point: Option<Point>,
display_offset: usize,
) {
let num_cols = self.size_info.columns();
let uris: Vec<_> = self
.highlighted_hint
.iter()
.chain(&self.vi_highlighted_hint)
.filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))
.map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))
.collect();
if uris.is_empty() {
return;
}
// The maximum amount of protected lines including the ones we'll show preview on.
let max_protected_lines = uris.len() * 2;
// Lines we shouldn't show preview on, because it'll obscure the highlighted hint.
let mut protected_lines = Vec::with_capacity(max_protected_lines);
if self.size_info.screen_lines() > max_protected_lines {
// Prefer to show preview even when it'll likely obscure the highlighted hint, when
// there's no place left for it.
protected_lines.push(self.hint_mouse_point.map(|point| point.line));
protected_lines.push(cursor_point.map(|point| point.line));
}
// Find the line in viewport we can draw preview on without obscuring protected lines.
let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);
let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);
let uri_lines = (viewport_top.0..=viewport_bottom.0)
.rev()
.map(|line| Some(Line(line)))
.filter_map(|line| {
if protected_lines.contains(&line) {
None
} else {
protected_lines.push(line);
line
}
})
.take(uris.len())
.flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
for (uri, point) in uris.into_iter().zip(uri_lines) {
// Damage the uri preview.
let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);
self.damage_tracker.frame().damage_line(damage);
// Damage the uri preview for the next frame as well.
self.damage_tracker.next_frame().damage_line(damage);
self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);
}
}
/// Draw current search regex.
#[inline(never)]
fn draw_search(&mut self, config: &UiConfig, text: &str) {
// Assure text length is at least num_cols.
let num_cols = self.size_info.columns();
let text = format!("{text:<num_cols$}");
let point = Point::new(self.size_info.screen_lines(), Column(0));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
self.renderer.draw_string(
point,
fg,
bg,
text.chars(),
&self.size_info,
&mut self.glyph_cache,
);
}
/// Draw render timer.
#[inline(never)]
fn draw_render_timer(&mut self, config: &UiConfig) {
if !config.debug.render_timer {
return;
}
let timing = format!("{:.3} usec", self.meter.average());
let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));
let fg = config.colors.primary.background;
let bg = config.colors.normal.red;
// Damage render timer for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);
}
/// Draw an indicator for the position of a line in history.
#[inline(never)]
fn draw_line_indicator(
&mut self,
config: &UiConfig,
total_lines: usize,
obstructed_column: Option<Column>,
line: usize,
) {
let columns = self.size_info.columns();
let text = format!("[{}/{}]", line, total_lines - 1);
let column = Column(self.size_info.columns().saturating_sub(text.len()));
let point = Point::new(0, column);
// Damage the line indicator for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let colors = &config.colors;
let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);
let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);
// Do not render anything if it would obscure the vi mode cursor.
if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);
}
}
/// Highlight damaged rects.
///
/// This function is for debug purposes only.
fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {
for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {
let x = damage_rect.x as f32;
let height = damage_rect.height as f32;
let width = damage_rect.width as f32;
let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;
let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);
render_rects.push(render_rect);
}
}
/// Check whether a hint highlight needs to be cleared.
fn validate_hint_highlights(&mut self, display_offset: usize) {
let frame = self.damage_tracker.frame();
let hints = [
(&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),
(&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),
];
let num_lines = self.size_info.screen_lines();
for (hint, hint_age, reset_mouse) in hints {
let (start, end) = match hint {
Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),
None => continue,
};
// Ignore hints that were created this frame.
*hint_age += 1;
if *hint_age == 1 {
continue;
}
// Convert hint bounds to viewport coordinates.
let start = term::point_to_viewport(display_offset, start)
.filter(|point| point.line < num_lines)
.unwrap_or_default();
let end = term::point_to_viewport(display_offset, end)
.filter(|point| point.line < num_lines)
.unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));
// Clear invalidated hints.
if frame.intersects(start, end) {
if reset_mouse {
self.window.set_mouse_cursor(CursorIcon::Default);
}
frame.mark_fully_damaged();
*hint = None;
}
}
}
/// Request a new frame for a window on Wayland.
fn request_frame(&mut self, scheduler: &mut Scheduler) {
// Mark that we've used a frame.
self.window.has_frame = false;
// Get the display vblank interval.
let monitor_vblank_interval = 1_000_000.
/ self
.window
.current_monitor()
.and_then(|monitor| monitor.refresh_rate_millihertz())
.unwrap_or(60_000) as f64;
// Now convert it to micro seconds.
let monitor_vblank_interval =
Duration::from_micros((1000. * monitor_vblank_interval) as u64);
let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);
let window_id = self.window.id();
let timer_id = TimerId::new(Topic::Frame, window_id);
let event = Event::new(EventType::Frame, window_id);
scheduler.schedule(event, swap_timeout, false, timer_id);
}
}
impl Drop for Display {
fn drop(&mut self) {
// Switch OpenGL context before dropping, otherwise objects (like programs) from other
// contexts might be deleted when dropping renderer.
self.make_current();
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
ManuallyDrop::drop(&mut self.surface);
}
}
}
/// Input method state.
#[derive(Debug, Default)]
pub struct Ime {
/// Whether the IME is enabled.
enabled: bool,
/// Current IME preedit.
preedit: Option<Preedit>,
}
impl Ime {
#[inline]
pub fn set_enabled(&mut self, is_enabled: bool) {
if is_enabled {
self.enabled = is_enabled
} else {
// Clear state when disabling IME.
*self = Default::default();
}
}
#[inline]
pub fn is_enabled(&self) -> bool {
self.enabled
}
#[inline]
pub fn set_preedit(&mut self, preedit: Option<Preedit>) {
self.preedit = preedit;
}
#[inline]
pub fn preedit(&self) -> Option<&Preedit> {
self.preedit.as_ref()
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Preedit {
/// The preedit text.
text: String,
/// Byte offset for cursor start into the preedit text.
///
/// `None` means that the cursor is invisible.
cursor_byte_offset: Option<(usize, usize)>,
/// The cursor offset from the end of the start of the preedit in char width.
cursor_end_offset: Option<(usize, usize)>,
}
impl Preedit {
pub fn new(text: String, cursor_byte_offset: Option<(usize, usize)>) -> Self {
let cursor_end_offset = if let Some(byte_offset) = cursor_byte_offset {
// Convert byte offset into char offset.
let start_to_end_offset =
text[byte_offset.0..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
let end_to_end_offset =
text[byte_offset.1..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
Some((start_to_end_offset, end_to_end_offset))
} else {
None
};
Self { text, cursor_byte_offset, cursor_end_offset }
}
}
/// Pending renderer updates.
///
/// All renderer updates are cached to be applied just before rendering, to avoid platform-specific
/// rendering issues.
#[derive(Debug, Default, Copy, Clone)]
pub struct RendererUpdate {
/// Should resize the window.
resize: bool,
/// Clear font caches.
clear_font_cache: bool,
}
/// The frame timer state.
pub struct FrameTimer {
/// Base timestamp used to compute sync points.
base: Instant,
/// The last timestamp we synced to.
last_synced_timestamp: Instant,
/// The refresh rate we've used to compute sync timestamps.
refresh_interval: Duration,
}
impl FrameTimer {
pub fn new() -> Self {
let now = Instant::now();
Self { base: now, last_synced_timestamp: now, refresh_interval: Duration::ZERO }
}
/// Compute the delay that we should use to achieve the target frame
/// rate.
pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {
let now = Instant::now();
// Handle refresh rate change.
if self.refresh_interval != refresh_interval {
self.base = now;
self.last_synced_timestamp = now;
self.refresh_interval = refresh_interval;
return refresh_interval;
}
let next_frame = self.last_synced_timestamp + self.refresh_interval;
if next_frame < now {
// Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.
let elapsed_micros = (now - self.base).as_micros() as u64;
let refresh_micros = self.refresh_interval.as_micros() as u64;
self.last_synced_timestamp =
now - Duration::from_micros(elapsed_micros % refresh_micros);
Duration::ZERO
} else {
// Redraw on the next `refresh_interval` clock tick.
self.last_synced_timestamp = next_frame;
next_frame - now
}
}
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
fn compute_cell_size(config: &UiConfig, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.font.offset.x);
let offset_y = f64::from(config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
/// Calculate the size of the window given padding, terminal dimensions and cell size.
fn window_size(
config: &UiConfig,
dimensions: Dimensions,
cell_width: f32,
cell_height: f32,
scale_factor: f32,
) -> PhysicalSize<u32> {
let padding = config.window.padding(scale_factor);
let grid_width = cell_width * dimensions.columns.max(MIN_COLUMNS) as f32;
let grid_height = cell_height * dimensions.lines.max(MIN_SCREEN_LINES) as f32;
let width = (padding.0).mul_add(2., grid_width).floor();
let height = (padding.1).mul_add(2., grid_height).floor();
PhysicalSize::new(width as u32, height as u32)
}
| rust | {
"argument_definitions": [],
"end_line": 261,
"name": "new",
"signature": "pub fn new(\n width: f32,\n height: f32,\n cell_width: f32,\n cell_height: f32,\n mut padding_x: f32,\n mut padding_y: f32,\n dynamic_padding: bool,\n ) -> SizeInfo",
"start_line": 231
} | {
"class_name": "impl SizeInfo<f32> {\n #[allow(clippy::too_many_arguments)]\n pub fn new(\n width: f32,\n height: f32,\n cell_width: f32,\n cell_height: f32,\n mut padding_x: f32,\n mut padding_y: f32,\n dynamic_padding: bool,\n ) -> SizeInfo {\n if dynamic_padding {\n padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);\n padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);\n }\n\n let lines = (height - 2. * padding_y) / cell_height;\n let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);\n\n let columns = (width - 2. * padding_x) / cell_width;\n let columns = cmp::max(columns as usize, MIN_COLUMNS);\n\n SizeInfo {\n width,\n height,\n cell_width,\n cell_height,\n padding_x: padding_x.floor(),\n padding_y: padding_y.floor(),\n screen_lines,\n columns,\n }\n }\n\n #[inline]\n pub fn reserve_lines(&mut self, count: usize) {\n self.screen_lines = cmp::max(self.screen_lines.saturating_sub(count), MIN_SCREEN_LINES);\n }\n\n /// Check if coordinates are inside the terminal grid.\n ///\n /// The padding, message bar or search are not counted as part of the grid.\n #[inline]\n pub fn contains_point(&self, x: usize, y: usize) -> bool {\n x <= (self.padding_x + self.columns as f32 * self.cell_width) as usize\n && x > self.padding_x as usize\n && y <= (self.padding_y + self.screen_lines as f32 * self.cell_height) as usize\n && y > self.padding_y as usize\n }\n\n /// Calculate padding to spread it evenly around the terminal content.\n #[inline]\n fn dynamic_padding(padding: f32, dimension: f32, cell_dimension: f32) -> f32 {\n padding + ((dimension - 2. * padding) % cell_dimension) / 2.\n }\n}",
"class_signature": "impl SizeInfo<f32>"
} |
update_highlighted_hints | alacritty-master/alacritty/src/display/mod.rs | pub fn update_highlighted_hints(
&mut self,
term: &Term<T>,
config: &UiConfig,
mouse: &Mouse,
modifiers: ModifiersState,
) -> bool {
// Update vi mode cursor hint.
let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {
let mods = ModifiersState::all();
let point = term.vi_mode_cursor.point;
hint::highlighted_at(term, config, point, mods)
} else {
None
};
let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;
self.vi_highlighted_hint = vi_highlighted_hint;
self.vi_highlighted_hint_age = 0;
// Force full redraw if the vi mode highlight was cleared.
if dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
// Abort if mouse highlighting conditions are not met.
if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {
if self.highlighted_hint.take().is_some() {
self.damage_tracker.frame().mark_fully_damaged();
dirty = true;
}
return dirty;
}
// Find highlighted hint at mouse position.
let point = mouse.point(&self.size_info, term.grid().display_offset());
let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);
// Update cursor shape.
if highlighted_hint.is_some() {
// If mouse changed the line, we should update the hyperlink preview, since the
// highlighted hint could be disrupted by the old preview.
dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);
self.hint_mouse_point = Some(point);
self.window.set_mouse_cursor(CursorIcon::Pointer);
} else if self.highlighted_hint.is_some() {
self.hint_mouse_point = None;
if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {
self.window.set_mouse_cursor(CursorIcon::Default);
} else {
self.window.set_mouse_cursor(CursorIcon::Text);
}
}
let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;
dirty |= mouse_highlight_dirty;
self.highlighted_hint = highlighted_hint;
self.highlighted_hint_age = 0;
// Force full redraw if the mouse cursor highlight was changed.
if mouse_highlight_dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
dirty
} | //! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::cmp;
use std::fmt::{self, Formatter};
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU32;
use std::ops::Deref;
use std::time::{Duration, Instant};
use glutin::config::GetGlConfig;
use glutin::context::{NotCurrentContext, PossiblyCurrentContext};
use glutin::display::GetGlDisplay;
use glutin::error::ErrorKind;
use glutin::prelude::*;
use glutin::surface::{Surface, SwapInterval, WindowSurface};
use log::{debug, info};
use parking_lot::MutexGuard;
use serde::{Deserialize, Serialize};
use winit::dpi::PhysicalSize;
use winit::keyboard::ModifiersState;
use winit::raw_window_handle::RawWindowHandle;
use winit::window::CursorIcon;
use crossfont::{Rasterize, Rasterizer, Size as FontSize};
use unicode_width::UnicodeWidthChar;
use alacritty_terminal::event::{EventListener, OnResize, WindowSize};
use alacritty_terminal::grid::Dimensions as TermDimensions;
use alacritty_terminal::index::{Column, Direction, Line, Point};
use alacritty_terminal::selection::Selection;
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::{
self, LineDamageBounds, Term, TermDamage, TermMode, MIN_COLUMNS, MIN_SCREEN_LINES,
};
use alacritty_terminal::vte::ansi::{CursorShape, NamedColor};
use crate::config::debug::RendererPreference;
use crate::config::font::Font;
use crate::config::window::Dimensions;
#[cfg(not(windows))]
use crate::config::window::StartupMode;
use crate::config::UiConfig;
use crate::display::bell::VisualBell;
use crate::display::color::{List, Rgb};
use crate::display::content::{RenderableContent, RenderableCursor};
use crate::display::cursor::IntoRects;
use crate::display::damage::{damage_y_to_viewport_y, DamageTracker};
use crate::display::hint::{HintMatch, HintState};
use crate::display::meter::Meter;
use crate::display::window::Window;
use crate::event::{Event, EventType, Mouse, SearchState};
use crate::message_bar::{MessageBuffer, MessageType};
use crate::renderer::rects::{RenderLine, RenderLines, RenderRect};
use crate::renderer::{self, platform, GlyphCache, Renderer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::string::{ShortenDirection, StrShortener};
pub mod color;
pub mod content;
pub mod cursor;
pub mod hint;
pub mod window;
mod bell;
mod damage;
mod meter;
/// Label for the forward terminal search bar.
const FORWARD_SEARCH_LABEL: &str = "Search: ";
/// Label for the backward terminal search bar.
const BACKWARD_SEARCH_LABEL: &str = "Backward Search: ";
/// The character used to shorten the visible text like uri preview or search regex.
const SHORTENER: char = '…';
/// Color which is used to highlight damaged rects when debugging.
const DAMAGE_RECT_COLOR: Rgb = Rgb::new(255, 0, 255);
#[derive(Debug)]
pub enum Error {
/// Error with window management.
Window(window::Error),
/// Error dealing with fonts.
Font(crossfont::Error),
/// Error in renderer.
Render(renderer::Error),
/// Error during context operations.
Context(glutin::error::Error),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Window(err) => err.source(),
Error::Font(err) => err.source(),
Error::Render(err) => err.source(),
Error::Context(err) => err.source(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::Window(err) => err.fmt(f),
Error::Font(err) => err.fmt(f),
Error::Render(err) => err.fmt(f),
Error::Context(err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Self {
Error::Window(val)
}
}
impl From<crossfont::Error> for Error {
fn from(val: crossfont::Error) -> Self {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Self {
Error::Render(val)
}
}
impl From<glutin::error::Error> for Error {
fn from(val: glutin::error::Error) -> Self {
Error::Context(val)
}
}
/// Terminal size info.
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub struct SizeInfo<T = f32> {
/// Terminal window width.
width: T,
/// Terminal window height.
height: T,
/// Width of individual cell.
cell_width: T,
/// Height of individual cell.
cell_height: T,
/// Horizontal window padding.
padding_x: T,
/// Vertical window padding.
padding_y: T,
/// Number of lines in the viewport.
screen_lines: usize,
/// Number of columns in the viewport.
columns: usize,
}
impl From<SizeInfo<f32>> for SizeInfo<u32> {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
width: size_info.width as u32,
height: size_info.height as u32,
cell_width: size_info.cell_width as u32,
cell_height: size_info.cell_height as u32,
padding_x: size_info.padding_x as u32,
padding_y: size_info.padding_y as u32,
screen_lines: size_info.screen_lines,
columns: size_info.screen_lines,
}
}
}
impl From<SizeInfo<f32>> for WindowSize {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
num_cols: size_info.columns() as u16,
num_lines: size_info.screen_lines() as u16,
cell_width: size_info.cell_width() as u16,
cell_height: size_info.cell_height() as u16,
}
}
}
impl<T: Clone + Copy> SizeInfo<T> {
#[inline]
pub fn width(&self) -> T {
self.width
}
#[inline]
pub fn height(&self) -> T {
self.height
}
#[inline]
pub fn cell_width(&self) -> T {
self.cell_width
}
#[inline]
pub fn cell_height(&self) -> T {
self.cell_height
}
#[inline]
pub fn padding_x(&self) -> T {
self.padding_x
}
#[inline]
pub fn padding_y(&self) -> T {
self.padding_y
}
}
impl SizeInfo<f32> {
#[allow(clippy::too_many_arguments)]
pub fn new(
width: f32,
height: f32,
cell_width: f32,
cell_height: f32,
mut padding_x: f32,
mut padding_y: f32,
dynamic_padding: bool,
) -> SizeInfo {
if dynamic_padding {
padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);
padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);
}
let lines = (height - 2. * padding_y) / cell_height;
let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);
let columns = (width - 2. * padding_x) / cell_width;
let columns = cmp::max(columns as usize, MIN_COLUMNS);
SizeInfo {
width,
height,
cell_width,
cell_height,
padding_x: padding_x.floor(),
padding_y: padding_y.floor(),
screen_lines,
columns,
}
}
#[inline]
pub fn reserve_lines(&mut self, count: usize) {
self.screen_lines = cmp::max(self.screen_lines.saturating_sub(count), MIN_SCREEN_LINES);
}
/// Check if coordinates are inside the terminal grid.
///
/// The padding, message bar or search are not counted as part of the grid.
#[inline]
pub fn contains_point(&self, x: usize, y: usize) -> bool {
x <= (self.padding_x + self.columns as f32 * self.cell_width) as usize
&& x > self.padding_x as usize
&& y <= (self.padding_y + self.screen_lines as f32 * self.cell_height) as usize
&& y > self.padding_y as usize
}
/// Calculate padding to spread it evenly around the terminal content.
#[inline]
fn dynamic_padding(padding: f32, dimension: f32, cell_dimension: f32) -> f32 {
padding + ((dimension - 2. * padding) % cell_dimension) / 2.
}
}
impl TermDimensions for SizeInfo {
#[inline]
fn columns(&self) -> usize {
self.columns
}
#[inline]
fn screen_lines(&self) -> usize {
self.screen_lines
}
#[inline]
fn total_lines(&self) -> usize {
self.screen_lines()
}
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct DisplayUpdate {
pub dirty: bool,
dimensions: Option<PhysicalSize<u32>>,
cursor_dirty: bool,
font: Option<Font>,
}
impl DisplayUpdate {
pub fn dimensions(&self) -> Option<PhysicalSize<u32>> {
self.dimensions
}
pub fn font(&self) -> Option<&Font> {
self.font.as_ref()
}
pub fn cursor_dirty(&self) -> bool {
self.cursor_dirty
}
pub fn set_dimensions(&mut self, dimensions: PhysicalSize<u32>) {
self.dimensions = Some(dimensions);
self.dirty = true;
}
pub fn set_font(&mut self, font: Font) {
self.font = Some(font);
self.dirty = true;
}
pub fn set_cursor_dirty(&mut self) {
self.cursor_dirty = true;
self.dirty = true;
}
}
/// The display wraps a window, font rasterizer, and GPU renderer.
pub struct Display {
pub window: Window,
pub size_info: SizeInfo,
/// Hint highlighted by the mouse.
pub highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
highlighted_hint_age: usize,
/// Hint highlighted by the vi mode cursor.
pub vi_highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
vi_highlighted_hint_age: usize,
pub raw_window_handle: RawWindowHandle,
/// UI cursor visibility for blinking.
pub cursor_hidden: bool,
pub visual_bell: VisualBell,
/// Mapped RGB values for each terminal color.
pub colors: List,
/// State of the keyboard hints.
pub hint_state: HintState,
/// Unprocessed display updates.
pub pending_update: DisplayUpdate,
/// The renderer update that takes place only once before the actual rendering.
pub pending_renderer_update: Option<RendererUpdate>,
/// The ime on the given display.
pub ime: Ime,
/// The state of the timer for frame scheduling.
pub frame_timer: FrameTimer,
/// Damage tracker for the given display.
pub damage_tracker: DamageTracker,
/// Font size used by the window.
pub font_size: FontSize,
// Mouse point position when highlighting hints.
hint_mouse_point: Option<Point>,
renderer: ManuallyDrop<Renderer>,
renderer_preference: Option<RendererPreference>,
surface: ManuallyDrop<Surface<WindowSurface>>,
context: ManuallyDrop<PossiblyCurrentContext>,
glyph_cache: GlyphCache,
meter: Meter,
}
impl Display {
pub fn new(
window: Window,
gl_context: NotCurrentContext,
config: &UiConfig,
_tabbed: bool,
) -> Result<Display, Error> {
let raw_window_handle = window.raw_window_handle();
let scale_factor = window.scale_factor as f32;
let rasterizer = Rasterizer::new()?;
let font_size = config.font.size().scale(scale_factor);
debug!("Loading \"{}\" font", &config.font.normal().family);
let font = config.font.clone().with_size(font_size);
let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;
let metrics = glyph_cache.font_metrics();
let (cell_width, cell_height) = compute_cell_size(config, &metrics);
// Resize the window to account for the user configured size.
if let Some(dimensions) = config.window.dimensions() {
let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);
window.request_inner_size(size);
}
// Create the GL surface to draw into.
let surface = platform::create_gl_surface(
&gl_context,
window.inner_size(),
window.raw_window_handle(),
)?;
// Make the context current.
let context = gl_context.make_current(&surface)?;
// Create renderer.
let mut renderer = Renderer::new(&context, config.debug.renderer)?;
// Load font common glyphs to accelerate rendering.
debug!("Filling glyph cache with common glyphs");
renderer.with_loader(|mut api| {
glyph_cache.reset_glyph_cache(&mut api);
});
let padding = config.window.padding(window.scale_factor as f32);
let viewport_size = window.inner_size();
// Create new size with at least one column and row.
let size_info = SizeInfo::new(
viewport_size.width as f32,
viewport_size.height as f32,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding && config.window.dimensions().is_none(),
);
info!("Cell size: {} x {}", cell_width, cell_height);
info!("Padding: {} x {}", size_info.padding_x(), size_info.padding_y());
info!("Width: {}, Height: {}", size_info.width(), size_info.height());
// Update OpenGL projection.
renderer.resize(&size_info);
// Clear screen.
let background_color = config.colors.primary.background;
renderer.clear(background_color, config.window_opacity());
// Disable shadows for transparent windows on macOS.
#[cfg(target_os = "macos")]
window.set_has_shadow(config.window_opacity() >= 1.0);
let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));
// On Wayland we can safely ignore this call, since the window isn't visible until you
// actually draw something into it and commit those changes.
if !is_wayland {
surface.swap_buffers(&context).expect("failed to swap buffers.");
renderer.finish();
}
// Set resize increments for the newly created window.
if config.window.resize_increments {
window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
window.set_visible(true);
// Always focus new windows, even if no Alacritty window is currently focused.
#[cfg(target_os = "macos")]
window.focus_window();
#[allow(clippy::single_match)]
#[cfg(not(windows))]
if !_tabbed {
match config.window.startup_mode {
#[cfg(target_os = "macos")]
StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),
StartupMode::Maximized if !is_wayland => window.set_maximized(true),
_ => (),
}
}
let hint_state = HintState::new(config.hints.alphabet());
let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());
damage_tracker.debug = config.debug.highlight_damage;
// Disable vsync.
if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {
info!("Failed to disable vsync: {}", err);
}
Ok(Self {
context: ManuallyDrop::new(context),
visual_bell: VisualBell::from(&config.bell),
renderer: ManuallyDrop::new(renderer),
renderer_preference: config.debug.renderer,
surface: ManuallyDrop::new(surface),
colors: List::from(&config.colors),
frame_timer: FrameTimer::new(),
raw_window_handle,
damage_tracker,
glyph_cache,
hint_state,
size_info,
font_size,
window,
pending_renderer_update: Default::default(),
vi_highlighted_hint_age: Default::default(),
highlighted_hint_age: Default::default(),
vi_highlighted_hint: Default::default(),
highlighted_hint: Default::default(),
hint_mouse_point: Default::default(),
pending_update: Default::default(),
cursor_hidden: Default::default(),
meter: Default::default(),
ime: Default::default(),
})
}
#[inline]
pub fn gl_context(&self) -> &PossiblyCurrentContext {
&self.context
}
pub fn make_not_current(&mut self) {
if self.context.is_current() {
self.context.make_not_current_in_place().expect("failed to disable context");
}
}
pub fn make_current(&mut self) {
let is_current = self.context.is_current();
// Attempt to make the context current if it's not.
let context_loss = if is_current {
self.renderer.was_context_reset()
} else {
match self.context.make_current(&self.surface) {
Err(err) if err.error_kind() == ErrorKind::ContextLost => {
info!("Context lost for window {:?}", self.window.id());
true
},
_ => false,
}
};
if !context_loss {
return;
}
let gl_display = self.context.display();
let gl_config = self.context.config();
let raw_window_handle = Some(self.window.raw_window_handle());
let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)
.expect("failed to recreate context.");
// Drop the old context and renderer.
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
}
// Activate new context.
let context = context.treat_as_possibly_current();
self.context = ManuallyDrop::new(context);
self.context.make_current(&self.surface).expect("failed to reativate context after reset.");
// Recreate renderer.
let renderer = Renderer::new(&self.context, self.renderer_preference)
.expect("failed to recreate renderer after reset");
self.renderer = ManuallyDrop::new(renderer);
// Resize the renderer.
self.renderer.resize(&self.size_info);
self.reset_glyph_cache();
self.damage_tracker.frame().mark_fully_damaged();
debug!("Recovered window {:?} from gpu reset", self.window.id());
}
fn swap_buffers(&self) {
#[allow(clippy::single_match)]
let res = match (self.surface.deref(), &self.context.deref()) {
#[cfg(not(any(target_os = "macos", windows)))]
(Surface::Egl(surface), PossiblyCurrentContext::Egl(context))
if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))
&& !self.damage_tracker.debug =>
{
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
surface.swap_buffers_with_damage(context, &damage)
},
(surface, context) => surface.swap_buffers(context),
};
if let Err(err) = res {
debug!("error calling swap_buffers: {}", err);
}
}
/// Update font size and cell dimensions.
///
/// This will return a tuple of the cell width and height.
fn update_font_size(
glyph_cache: &mut GlyphCache,
config: &UiConfig,
font: &Font,
) -> (f32, f32) {
let _ = glyph_cache.update_font_size(font);
// Compute new cell sizes.
compute_cell_size(config, &glyph_cache.font_metrics())
}
/// Reset glyph cache.
fn reset_glyph_cache(&mut self) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
cache.reset_glyph_cache(&mut api);
});
}
// XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are
// performed in [`Self::process_renderer_update`] right before drawing.
//
/// Process update events.
pub fn handle_update<T>(
&mut self,
terminal: &mut Term<T>,
pty_resize_handle: &mut dyn OnResize,
message_buffer: &MessageBuffer,
search_state: &mut SearchState,
config: &UiConfig,
) where
T: EventListener,
{
let pending_update = mem::take(&mut self.pending_update);
let (mut cell_width, mut cell_height) =
(self.size_info.cell_width(), self.size_info.cell_height());
if pending_update.font().is_some() || pending_update.cursor_dirty() {
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.clear_font_cache = true
}
// Update font size and cell dimensions.
if let Some(font) = pending_update.font() {
let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);
cell_width = cell_dimensions.0;
cell_height = cell_dimensions.1;
info!("Cell size: {} x {}", cell_width, cell_height);
// Mark entire terminal as damaged since glyph size could change without cell size
// changes.
self.damage_tracker.frame().mark_fully_damaged();
}
let (mut width, mut height) = (self.size_info.width(), self.size_info.height());
if let Some(dimensions) = pending_update.dimensions() {
width = dimensions.width as f32;
height = dimensions.height as f32;
}
let padding = config.window.padding(self.window.scale_factor as f32);
let mut new_size = SizeInfo::new(
width,
height,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding,
);
// Update number of column/lines in the viewport.
let search_active = search_state.history_index.is_some();
let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());
let search_lines = usize::from(search_active);
new_size.reserve_lines(message_bar_lines + search_lines);
// Update resize increments.
if config.window.resize_increments {
self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
// Resize when terminal when its dimensions have changed.
if self.size_info.screen_lines() != new_size.screen_lines
|| self.size_info.columns() != new_size.columns()
{
// Resize PTY.
pty_resize_handle.on_resize(new_size.into());
// Resize terminal.
terminal.resize(new_size);
// Resize damage tracking.
self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());
}
// Check if dimensions have changed.
if new_size != self.size_info {
// Queue renderer update.
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.resize = true;
// Clear focused search match.
search_state.clear_focused_match();
}
self.size_info = new_size;
}
// NOTE: Renderer updates are split off, since platforms like Wayland require resize and other
// OpenGL operations to be performed right before rendering. Otherwise they could lock the
// back buffer and render with the previous state. This also solves flickering during resizes.
//
/// Update the state of the renderer.
pub fn process_renderer_update(&mut self) {
let renderer_update = match self.pending_renderer_update.take() {
Some(renderer_update) => renderer_update,
_ => return,
};
// Resize renderer.
if renderer_update.resize {
let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();
let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();
self.surface.resize(&self.context, width, height);
}
// Ensure we're modifying the correct OpenGL context.
self.make_current();
if renderer_update.clear_font_cache {
self.reset_glyph_cache();
}
self.renderer.resize(&self.size_info);
info!("Padding: {} x {}", self.size_info.padding_x(), self.size_info.padding_y());
info!("Width: {}, Height: {}", self.size_info.width(), self.size_info.height());
}
/// Draw the screen.
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled.
pub fn draw<T: EventListener>(
&mut self,
mut terminal: MutexGuard<'_, Term<T>>,
scheduler: &mut Scheduler,
message_buffer: &MessageBuffer,
config: &UiConfig,
search_state: &mut SearchState,
) {
// Collect renderable content before the terminal is dropped.
let mut content = RenderableContent::new(config, self, &terminal, search_state);
let mut grid_cells = Vec::new();
for cell in &mut content {
grid_cells.push(cell);
}
let selection_range = content.selection_range();
let foreground_color = content.color(NamedColor::Foreground as usize);
let background_color = content.color(NamedColor::Background as usize);
let display_offset = content.display_offset();
let cursor = content.cursor();
let cursor_point = terminal.grid().cursor.point;
let total_lines = terminal.grid().total_lines();
let metrics = self.glyph_cache.font_metrics();
let size_info = self.size_info;
let vi_mode = terminal.mode().contains(TermMode::VI);
let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };
// Add damage from the terminal.
match terminal.damage() {
TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),
TermDamage::Partial(damaged_lines) => {
for damage in damaged_lines {
self.damage_tracker.frame().damage_line(damage);
}
},
}
terminal.reset_damage();
// Drop terminal as early as possible to free lock.
drop(terminal);
// Invalidate highlighted hints if grid has changed.
self.validate_hint_highlights(display_offset);
// Add damage from alacritty's UI elements overlapping terminal.
let requires_full_damage = self.visual_bell.intensity() != 0.
|| self.hint_state.active()
|| search_state.regex().is_some();
if requires_full_damage {
self.damage_tracker.frame().mark_fully_damaged();
self.damage_tracker.next_frame().mark_fully_damaged();
}
let vi_cursor_viewport_point =
vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));
self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);
self.damage_tracker.damage_selection(selection_range, display_offset);
// Make sure this window's OpenGL context is active.
self.make_current();
self.renderer.clear(background_color, config.window_opacity());
let mut lines = RenderLines::new();
// Optimize loop hint comparator.
let has_highlighted_hint =
self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();
// Draw grid.
{
let _sampler = self.meter.sampler();
// Ensure macOS hasn't reset our viewport.
#[cfg(target_os = "macos")]
self.renderer.set_viewport(&size_info);
let glyph_cache = &mut self.glyph_cache;
let highlighted_hint = &self.highlighted_hint;
let vi_highlighted_hint = &self.vi_highlighted_hint;
let damage_tracker = &mut self.damage_tracker;
let cells = grid_cells.into_iter().map(|mut cell| {
// Underline hints hovered by mouse or vi mode cursor.
if has_highlighted_hint {
let point = term::viewport_to_point(display_offset, cell.point);
let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());
let should_highlight = |hint: &Option<HintMatch>| {
hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))
};
if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {
damage_tracker.frame().damage_point(cell.point);
cell.flags.insert(Flags::UNDERLINE);
}
}
// Update underline/strikeout.
lines.update(&cell);
cell
});
self.renderer.draw_cells(&size_info, glyph_cache, cells);
}
let mut rects = lines.rects(&metrics, &size_info);
if let Some(vi_cursor_point) = vi_cursor_point {
// Indicate vi mode by showing the cursor's position in the top right corner.
let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;
let obstructed_column = Some(vi_cursor_point)
.filter(|point| point.line == -(display_offset as i32))
.map(|point| point.column);
self.draw_line_indicator(config, total_lines, obstructed_column, line);
} else if search_state.regex().is_some() {
// Show current display offset in vi-less search to indicate match position.
self.draw_line_indicator(config, total_lines, None, display_offset);
};
// Draw cursor.
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
// Push visual bell after url/underline/strikeout rects.
let visual_bell_intensity = self.visual_bell.intensity();
if visual_bell_intensity != 0. {
let visual_bell_rect = RenderRect::new(
0.,
0.,
size_info.width(),
size_info.height(),
config.bell.color,
visual_bell_intensity as f32,
);
rects.push(visual_bell_rect);
}
// Handle IME positioning and search bar rendering.
let ime_position = match search_state.regex() {
Some(regex) => {
let search_label = match search_state.direction() {
Direction::Right => FORWARD_SEARCH_LABEL,
Direction::Left => BACKWARD_SEARCH_LABEL,
};
let search_text = Self::format_search(regex, search_label, size_info.columns());
// Render the search bar.
self.draw_search(config, &search_text);
// Draw search bar cursor.
let line = size_info.screen_lines();
let column = Column(search_text.chars().count() - 1);
// Add cursor to search bar if IME is not active.
if self.ime.preedit().is_none() {
let fg = config.colors.footer_bar_foreground();
let shape = CursorShape::Underline;
let cursor_width = NonZeroU32::new(1).unwrap();
let cursor =
RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
}
Some(Point::new(line, column))
},
None => {
let num_lines = self.size_info.screen_lines();
match vi_cursor_viewport_point {
None => term::point_to_viewport(display_offset, cursor_point)
.filter(|point| point.line < num_lines),
point => point,
}
},
};
// Handle IME.
if self.ime.is_enabled() {
if let Some(point) = ime_position {
let (fg, bg) = if search_state.regex().is_some() {
(config.colors.footer_bar_foreground(), config.colors.footer_bar_background())
} else {
(foreground_color, background_color)
};
self.draw_ime_preview(point, fg, bg, &mut rects, config);
}
}
if let Some(message) = message_buffer.message() {
let search_offset = usize::from(search_state.regex().is_some());
let text = message.text(&size_info);
// Create a new rectangle for the background.
let start_line = size_info.screen_lines() + search_offset;
let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());
let bg = match message.ty() {
MessageType::Error => config.colors.normal.red,
MessageType::Warning => config.colors.normal.yellow,
};
let x = 0;
let width = size_info.width() as i32;
let height = (size_info.height() - y) as i32;
let message_bar_rect =
RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);
// Push message_bar in the end, so it'll be above all other content.
rects.push(message_bar_rect);
// Always damage message bar, since it could have messages of the same size in it.
self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
// Relay messages to the user.
let glyph_cache = &mut self.glyph_cache;
let fg = config.colors.primary.background;
for (i, message_text) in text.iter().enumerate() {
let point = Point::new(start_line + i, Column(0));
self.renderer.draw_string(
point,
fg,
bg,
message_text.chars(),
&size_info,
glyph_cache,
);
}
} else {
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
}
self.draw_render_timer(config);
// Draw hyperlink uri preview.
if has_highlighted_hint {
let cursor_point = vi_cursor_point.or(Some(cursor_point));
self.draw_hyperlink_preview(config, cursor_point, display_offset);
}
// Notify winit that we're about to present.
self.window.pre_present_notify();
// Highlight damage for debugging.
if self.damage_tracker.debug {
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
let mut rects = Vec::with_capacity(damage.len());
self.highlight_damage(&mut rects);
self.renderer.draw_rects(&self.size_info, &metrics, rects);
}
// Clearing debug highlights from the previous frame requires full redraw.
self.swap_buffers();
if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {
// On X11 `swap_buffers` does not block for vsync. However the next OpenGl command
// will block to synchronize (this is `glClear` in Alacritty), which causes a
// permanent one frame delay.
self.renderer.finish();
}
// XXX: Request the new frame after swapping buffers, so the
// time to finish OpenGL operations is accounted for in the timeout.
if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {
self.request_frame(scheduler);
}
self.damage_tracker.swap_damage();
}
/// Update to a new configuration.
pub fn update_config(&mut self, config: &UiConfig) {
self.damage_tracker.debug = config.debug.highlight_damage;
self.visual_bell.update_config(&config.bell);
self.colors = List::from(&config.colors);
}
/// Update the mouse/vi mode cursor hint highlighting.
///
/// This will return whether the highlighted hints changed.
pub fn update_highlighted_hints<T>(
&mut self,
term: &Term<T>,
config: &UiConfig,
mouse: &Mouse,
modifiers: ModifiersState,
) -> bool {
// Update vi mode cursor hint.
let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {
let mods = ModifiersState::all();
let point = term.vi_mode_cursor.point;
hint::highlighted_at(term, config, point, mods)
} else {
None
};
let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;
self.vi_highlighted_hint = vi_highlighted_hint;
self.vi_highlighted_hint_age = 0;
// Force full redraw if the vi mode highlight was cleared.
if dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
// Abort if mouse highlighting conditions are not met.
if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {
if self.highlighted_hint.take().is_some() {
self.damage_tracker.frame().mark_fully_damaged();
dirty = true;
}
return dirty;
}
// Find highlighted hint at mouse position.
let point = mouse.point(&self.size_info, term.grid().display_offset());
let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);
// Update cursor shape.
if highlighted_hint.is_some() {
// If mouse changed the line, we should update the hyperlink preview, since the
// highlighted hint could be disrupted by the old preview.
dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);
self.hint_mouse_point = Some(point);
self.window.set_mouse_cursor(CursorIcon::Pointer);
} else if self.highlighted_hint.is_some() {
self.hint_mouse_point = None;
if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {
self.window.set_mouse_cursor(CursorIcon::Default);
} else {
self.window.set_mouse_cursor(CursorIcon::Text);
}
}
let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;
dirty |= mouse_highlight_dirty;
self.highlighted_hint = highlighted_hint;
self.highlighted_hint_age = 0;
// Force full redraw if the mouse cursor highlight was changed.
if mouse_highlight_dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
dirty
}
#[inline(never)]
fn draw_ime_preview(
&mut self,
point: Point<usize>,
fg: Rgb,
bg: Rgb,
rects: &mut Vec<RenderRect>,
config: &UiConfig,
) {
let preedit = match self.ime.preedit() {
Some(preedit) => preedit,
None => {
// In case we don't have preedit, just set the popup point.
self.window.update_ime_position(point, &self.size_info);
return;
},
};
let num_cols = self.size_info.columns();
// Get the visible preedit.
let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {
(Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(
&preedit.text[byte_offset.0..],
num_cols,
ShortenDirection::Right,
Some(SHORTENER),
),
_ => {
StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))
},
}
.collect();
let visible_len = visible_text.chars().count();
let end = cmp::min(point.column.0 + visible_len, num_cols);
let start = end.saturating_sub(visible_len);
let start = Point::new(point.line, Column(start));
let end = Point::new(point.line, Column(end - 1));
let glyph_cache = &mut self.glyph_cache;
let metrics = glyph_cache.font_metrics();
self.renderer.draw_string(
start,
fg,
bg,
visible_text.chars(),
&self.size_info,
glyph_cache,
);
// Damage preedit inside the terminal viewport.
if point.line < self.size_info.screen_lines() {
let damage = LineDamageBounds::new(start.line, 0, num_cols);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
}
// Add underline for preedit text.
let underline = RenderLine { start, end, color: fg };
rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));
let ime_popup_point = match preedit.cursor_end_offset {
Some(cursor_end_offset) => {
// Use hollow block when multiple characters are changed at once.
let (shape, width) = if let Some(width) =
NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)
{
(CursorShape::HollowBlock, width)
} else {
(CursorShape::Beam, NonZeroU32::new(1).unwrap())
};
let cursor_column = Column(
(end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,
);
let cursor_point = Point::new(point.line, cursor_column);
let cursor = RenderableCursor::new(cursor_point, shape, fg, width);
rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));
cursor_point
},
_ => end,
};
self.window.update_ime_position(ime_popup_point, &self.size_info);
}
/// Format search regex to account for the cursor and fullwidth characters.
fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {
let label_len = search_label.len();
// Skip `search_regex` formatting if only label is visible.
if label_len > max_width {
return search_label[..max_width].to_owned();
}
// The search string consists of `search_label` + `search_regex` + `cursor`.
let mut bar_text = String::from(search_label);
bar_text.extend(StrShortener::new(
search_regex,
max_width.wrapping_sub(label_len + 1),
ShortenDirection::Left,
Some(SHORTENER),
));
// Add place for cursor.
bar_text.push(' ');
bar_text
}
/// Draw preview for the currently highlighted `Hyperlink`.
#[inline(never)]
fn draw_hyperlink_preview(
&mut self,
config: &UiConfig,
cursor_point: Option<Point>,
display_offset: usize,
) {
let num_cols = self.size_info.columns();
let uris: Vec<_> = self
.highlighted_hint
.iter()
.chain(&self.vi_highlighted_hint)
.filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))
.map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))
.collect();
if uris.is_empty() {
return;
}
// The maximum amount of protected lines including the ones we'll show preview on.
let max_protected_lines = uris.len() * 2;
// Lines we shouldn't show preview on, because it'll obscure the highlighted hint.
let mut protected_lines = Vec::with_capacity(max_protected_lines);
if self.size_info.screen_lines() > max_protected_lines {
// Prefer to show preview even when it'll likely obscure the highlighted hint, when
// there's no place left for it.
protected_lines.push(self.hint_mouse_point.map(|point| point.line));
protected_lines.push(cursor_point.map(|point| point.line));
}
// Find the line in viewport we can draw preview on without obscuring protected lines.
let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);
let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);
let uri_lines = (viewport_top.0..=viewport_bottom.0)
.rev()
.map(|line| Some(Line(line)))
.filter_map(|line| {
if protected_lines.contains(&line) {
None
} else {
protected_lines.push(line);
line
}
})
.take(uris.len())
.flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
for (uri, point) in uris.into_iter().zip(uri_lines) {
// Damage the uri preview.
let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);
self.damage_tracker.frame().damage_line(damage);
// Damage the uri preview for the next frame as well.
self.damage_tracker.next_frame().damage_line(damage);
self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);
}
}
/// Draw current search regex.
#[inline(never)]
fn draw_search(&mut self, config: &UiConfig, text: &str) {
// Assure text length is at least num_cols.
let num_cols = self.size_info.columns();
let text = format!("{text:<num_cols$}");
let point = Point::new(self.size_info.screen_lines(), Column(0));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
self.renderer.draw_string(
point,
fg,
bg,
text.chars(),
&self.size_info,
&mut self.glyph_cache,
);
}
/// Draw render timer.
#[inline(never)]
fn draw_render_timer(&mut self, config: &UiConfig) {
if !config.debug.render_timer {
return;
}
let timing = format!("{:.3} usec", self.meter.average());
let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));
let fg = config.colors.primary.background;
let bg = config.colors.normal.red;
// Damage render timer for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);
}
/// Draw an indicator for the position of a line in history.
#[inline(never)]
fn draw_line_indicator(
&mut self,
config: &UiConfig,
total_lines: usize,
obstructed_column: Option<Column>,
line: usize,
) {
let columns = self.size_info.columns();
let text = format!("[{}/{}]", line, total_lines - 1);
let column = Column(self.size_info.columns().saturating_sub(text.len()));
let point = Point::new(0, column);
// Damage the line indicator for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let colors = &config.colors;
let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);
let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);
// Do not render anything if it would obscure the vi mode cursor.
if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);
}
}
/// Highlight damaged rects.
///
/// This function is for debug purposes only.
fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {
for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {
let x = damage_rect.x as f32;
let height = damage_rect.height as f32;
let width = damage_rect.width as f32;
let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;
let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);
render_rects.push(render_rect);
}
}
/// Check whether a hint highlight needs to be cleared.
fn validate_hint_highlights(&mut self, display_offset: usize) {
let frame = self.damage_tracker.frame();
let hints = [
(&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),
(&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),
];
let num_lines = self.size_info.screen_lines();
for (hint, hint_age, reset_mouse) in hints {
let (start, end) = match hint {
Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),
None => continue,
};
// Ignore hints that were created this frame.
*hint_age += 1;
if *hint_age == 1 {
continue;
}
// Convert hint bounds to viewport coordinates.
let start = term::point_to_viewport(display_offset, start)
.filter(|point| point.line < num_lines)
.unwrap_or_default();
let end = term::point_to_viewport(display_offset, end)
.filter(|point| point.line < num_lines)
.unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));
// Clear invalidated hints.
if frame.intersects(start, end) {
if reset_mouse {
self.window.set_mouse_cursor(CursorIcon::Default);
}
frame.mark_fully_damaged();
*hint = None;
}
}
}
/// Request a new frame for a window on Wayland.
fn request_frame(&mut self, scheduler: &mut Scheduler) {
// Mark that we've used a frame.
self.window.has_frame = false;
// Get the display vblank interval.
let monitor_vblank_interval = 1_000_000.
/ self
.window
.current_monitor()
.and_then(|monitor| monitor.refresh_rate_millihertz())
.unwrap_or(60_000) as f64;
// Now convert it to micro seconds.
let monitor_vblank_interval =
Duration::from_micros((1000. * monitor_vblank_interval) as u64);
let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);
let window_id = self.window.id();
let timer_id = TimerId::new(Topic::Frame, window_id);
let event = Event::new(EventType::Frame, window_id);
scheduler.schedule(event, swap_timeout, false, timer_id);
}
}
impl Drop for Display {
fn drop(&mut self) {
// Switch OpenGL context before dropping, otherwise objects (like programs) from other
// contexts might be deleted when dropping renderer.
self.make_current();
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
ManuallyDrop::drop(&mut self.surface);
}
}
}
/// Input method state.
#[derive(Debug, Default)]
pub struct Ime {
/// Whether the IME is enabled.
enabled: bool,
/// Current IME preedit.
preedit: Option<Preedit>,
}
impl Ime {
#[inline]
pub fn set_enabled(&mut self, is_enabled: bool) {
if is_enabled {
self.enabled = is_enabled
} else {
// Clear state when disabling IME.
*self = Default::default();
}
}
#[inline]
pub fn is_enabled(&self) -> bool {
self.enabled
}
#[inline]
pub fn set_preedit(&mut self, preedit: Option<Preedit>) {
self.preedit = preedit;
}
#[inline]
pub fn preedit(&self) -> Option<&Preedit> {
self.preedit.as_ref()
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Preedit {
/// The preedit text.
text: String,
/// Byte offset for cursor start into the preedit text.
///
/// `None` means that the cursor is invisible.
cursor_byte_offset: Option<(usize, usize)>,
/// The cursor offset from the end of the start of the preedit in char width.
cursor_end_offset: Option<(usize, usize)>,
}
impl Preedit {
pub fn new(text: String, cursor_byte_offset: Option<(usize, usize)>) -> Self {
let cursor_end_offset = if let Some(byte_offset) = cursor_byte_offset {
// Convert byte offset into char offset.
let start_to_end_offset =
text[byte_offset.0..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
let end_to_end_offset =
text[byte_offset.1..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
Some((start_to_end_offset, end_to_end_offset))
} else {
None
};
Self { text, cursor_byte_offset, cursor_end_offset }
}
}
/// Pending renderer updates.
///
/// All renderer updates are cached to be applied just before rendering, to avoid platform-specific
/// rendering issues.
#[derive(Debug, Default, Copy, Clone)]
pub struct RendererUpdate {
/// Should resize the window.
resize: bool,
/// Clear font caches.
clear_font_cache: bool,
}
/// The frame timer state.
pub struct FrameTimer {
/// Base timestamp used to compute sync points.
base: Instant,
/// The last timestamp we synced to.
last_synced_timestamp: Instant,
/// The refresh rate we've used to compute sync timestamps.
refresh_interval: Duration,
}
impl FrameTimer {
pub fn new() -> Self {
let now = Instant::now();
Self { base: now, last_synced_timestamp: now, refresh_interval: Duration::ZERO }
}
/// Compute the delay that we should use to achieve the target frame
/// rate.
pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {
let now = Instant::now();
// Handle refresh rate change.
if self.refresh_interval != refresh_interval {
self.base = now;
self.last_synced_timestamp = now;
self.refresh_interval = refresh_interval;
return refresh_interval;
}
let next_frame = self.last_synced_timestamp + self.refresh_interval;
if next_frame < now {
// Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.
let elapsed_micros = (now - self.base).as_micros() as u64;
let refresh_micros = self.refresh_interval.as_micros() as u64;
self.last_synced_timestamp =
now - Duration::from_micros(elapsed_micros % refresh_micros);
Duration::ZERO
} else {
// Redraw on the next `refresh_interval` clock tick.
self.last_synced_timestamp = next_frame;
next_frame - now
}
}
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
fn compute_cell_size(config: &UiConfig, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.font.offset.x);
let offset_y = f64::from(config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
/// Calculate the size of the window given padding, terminal dimensions and cell size.
fn window_size(
config: &UiConfig,
dimensions: Dimensions,
cell_width: f32,
cell_height: f32,
scale_factor: f32,
) -> PhysicalSize<u32> {
let padding = config.window.padding(scale_factor);
let grid_width = cell_width * dimensions.columns.max(MIN_COLUMNS) as f32;
let grid_height = cell_height * dimensions.lines.max(MIN_SCREEN_LINES) as f32;
let width = (padding.0).mul_add(2., grid_width).floor();
let height = (padding.1).mul_add(2., grid_height).floor();
PhysicalSize::new(width as u32, height as u32)
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Term<T> {\n /// Terminal focus controlling the cursor shape.\n pub is_focused: bool,\n\n /// Cursor for keyboard selection.\n pub vi_mode_cursor: ViModeCursor,\n\n pub selection: Option<Selection>,\n\n /// Currently active grid.\n ///\n /// Tracks the screen buffer currently in use. While the alternate screen buffer is active,\n /// this will be the alternate grid. Otherwise it is the primary screen buffer.\n grid: Grid<Cell>,\n\n /// Currently inactive grid.\n ///\n /// Opposite of the active grid. While the alternate screen buffer is active, this will be the\n /// primary grid. Otherwise it is the alternate screen buffer.\n inactive_grid: Grid<Cell>,\n\n /// Index into `charsets`, pointing to what ASCII is currently being mapped to.\n active_charset: CharsetIndex,\n\n /// Tabstops.\n tabs: TabStops,\n\n /// Mode flags.\n mode: TermMode,\n\n /// Scroll region.\n ///\n /// Range going from top to bottom of the terminal, indexed from the top of the viewport.\n scroll_region: Range<Line>,\n\n /// Modified terminal colors.\n colors: Colors,\n\n /// Current style of the cursor.\n cursor_style: Option<CursorStyle>,\n\n /// Proxy for sending events to the event loop.\n event_proxy: T,\n\n /// Current title of the window.\n title: Option<String>,\n\n /// Stack of saved window titles. When a title is popped from this stack, the `title` for the\n /// term is set.\n title_stack: Vec<Option<String>>,\n\n /// The stack for the keyboard modes.\n keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Currently inactive keyboard mode stack.\n inactive_keyboard_mode_stack: Vec<KeyboardModes>,\n\n /// Information about damaged cells.\n damage: TermDamageState,\n\n /// Config directly for the terminal.\n config: Config,\n}"
],
"name": "term",
"type": "&Term<T>"
},
{
"definitions": [
"pub struct UiConfig {\n /// Miscellaneous configuration options.\n pub general: General,\n\n /// Extra environment variables.\n pub env: HashMap<String, String>,\n\n /// How much scrolling history to keep.\n pub scrolling: Scrolling,\n\n /// Cursor configuration.\n pub cursor: Cursor,\n\n /// Selection configuration.\n pub selection: Selection,\n\n /// Font configuration.\n pub font: Font,\n\n /// Window configuration.\n pub window: WindowConfig,\n\n /// Mouse configuration.\n pub mouse: Mouse,\n\n /// Debug options.\n pub debug: Debug,\n\n /// Bell configuration.\n pub bell: BellConfig,\n\n /// RGB values for colors.\n pub colors: Colors,\n\n /// Path where config was loaded from.\n #[config(skip)]\n pub config_paths: Vec<PathBuf>,\n\n /// Regex hints for interacting with terminal content.\n pub hints: Hints,\n\n /// Config for the alacritty_terminal itself.\n pub terminal: Terminal,\n\n /// Keyboard configuration.\n keyboard: Keyboard,\n\n /// Path to a shell program to run on startup.\n #[config(deprecated = \"use terminal.shell instead\")]\n shell: Option<Program>,\n\n /// Configuration file imports.\n ///\n /// This is never read since the field is directly accessed through the config's\n /// [`toml::Value`], but still present to prevent unused field warnings.\n #[config(deprecated = \"use general.import instead\")]\n import: Option<Vec<String>>,\n\n /// Shell startup directory.\n #[config(deprecated = \"use general.working_directory instead\")]\n working_directory: Option<PathBuf>,\n\n /// Live config reload.\n #[config(deprecated = \"use general.live_config_reload instead\")]\n live_config_reload: Option<bool>,\n\n /// Offer IPC through a unix socket.\n #[cfg(unix)]\n #[config(deprecated = \"use general.ipc_socket instead\")]\n pub ipc_socket: Option<bool>,\n}"
],
"name": "config",
"type": "&UiConfig"
},
{
"definitions": [
"pub struct Mouse {\n pub left_button_state: ElementState,\n pub middle_button_state: ElementState,\n pub right_button_state: ElementState,\n pub last_click_timestamp: Instant,\n pub last_click_button: MouseButton,\n pub click_state: ClickState,\n pub accumulated_scroll: AccumulatedScroll,\n pub cell_side: Side,\n pub block_hint_launcher: bool,\n pub hint_highlight_dirty: bool,\n pub inside_text_area: bool,\n pub x: usize,\n pub y: usize,\n}"
],
"name": "mouse",
"type": "&Mouse"
},
{
"definitions": [
" pub struct ModifiersState: u32 {\n /// The \"shift\" key.\n const SHIFT = 0b100;\n /// The \"control\" key.\n const CONTROL = 0b100 << 3;\n /// The \"alt\" key.\n const ALT = 0b100 << 6;\n /// This is the \"windows\" key on PC and \"command\" key on Mac.\n const SUPER = 0b100 << 9;\n }"
],
"name": "modifiers",
"type": "ModifiersState"
}
],
"end_line": 1123,
"name": "update_highlighted_hints",
"signature": "pub fn update_highlighted_hints(\n &mut self,\n term: &Term<T>,\n config: &UiConfig,\n mouse: &Mouse,\n modifiers: ModifiersState,\n ) -> bool",
"start_line": 1059
} | {
"class_name": "impl Display {\n pub fn new(\n window: Window,\n gl_context: NotCurrentContext,\n config: &UiConfig,\n _tabbed: bool,\n ) -> Result<Display, Error> {\n let raw_window_handle = window.raw_window_handle();\n\n let scale_factor = window.scale_factor as f32;\n let rasterizer = Rasterizer::new()?;\n\n let font_size = config.font.size().scale(scale_factor);\n debug!(\"Loading \\\"{}\\\" font\", &config.font.normal().family);\n let font = config.font.clone().with_size(font_size);\n let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;\n\n let metrics = glyph_cache.font_metrics();\n let (cell_width, cell_height) = compute_cell_size(config, &metrics);\n\n // Resize the window to account for the user configured size.\n if let Some(dimensions) = config.window.dimensions() {\n let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);\n window.request_inner_size(size);\n }\n\n // Create the GL surface to draw into.\n let surface = platform::create_gl_surface(\n &gl_context,\n window.inner_size(),\n window.raw_window_handle(),\n )?;\n\n // Make the context current.\n let context = gl_context.make_current(&surface)?;\n\n // Create renderer.\n let mut renderer = Renderer::new(&context, config.debug.renderer)?;\n\n // Load font common glyphs to accelerate rendering.\n debug!(\"Filling glyph cache with common glyphs\");\n renderer.with_loader(|mut api| {\n glyph_cache.reset_glyph_cache(&mut api);\n });\n\n let padding = config.window.padding(window.scale_factor as f32);\n let viewport_size = window.inner_size();\n\n // Create new size with at least one column and row.\n let size_info = SizeInfo::new(\n viewport_size.width as f32,\n viewport_size.height as f32,\n cell_width,\n cell_height,\n padding.0,\n padding.1,\n config.window.dynamic_padding && config.window.dimensions().is_none(),\n );\n\n info!(\"Cell size: {} x {}\", cell_width, cell_height);\n info!(\"Padding: {} x {}\", size_info.padding_x(), size_info.padding_y());\n info!(\"Width: {}, Height: {}\", size_info.width(), size_info.height());\n\n // Update OpenGL projection.\n renderer.resize(&size_info);\n\n // Clear screen.\n let background_color = config.colors.primary.background;\n renderer.clear(background_color, config.window_opacity());\n\n // Disable shadows for transparent windows on macOS.\n #[cfg(target_os = \"macos\")]\n window.set_has_shadow(config.window_opacity() >= 1.0);\n\n let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));\n\n // On Wayland we can safely ignore this call, since the window isn't visible until you\n // actually draw something into it and commit those changes.\n if !is_wayland {\n surface.swap_buffers(&context).expect(\"failed to swap buffers.\");\n renderer.finish();\n }\n\n // Set resize increments for the newly created window.\n if config.window.resize_increments {\n window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));\n }\n\n window.set_visible(true);\n\n // Always focus new windows, even if no Alacritty window is currently focused.\n #[cfg(target_os = \"macos\")]\n window.focus_window();\n\n #[allow(clippy::single_match)]\n #[cfg(not(windows))]\n if !_tabbed {\n match config.window.startup_mode {\n #[cfg(target_os = \"macos\")]\n StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),\n StartupMode::Maximized if !is_wayland => window.set_maximized(true),\n _ => (),\n }\n }\n\n let hint_state = HintState::new(config.hints.alphabet());\n\n let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());\n damage_tracker.debug = config.debug.highlight_damage;\n\n // Disable vsync.\n if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {\n info!(\"Failed to disable vsync: {}\", err);\n }\n\n Ok(Self {\n context: ManuallyDrop::new(context),\n visual_bell: VisualBell::from(&config.bell),\n renderer: ManuallyDrop::new(renderer),\n renderer_preference: config.debug.renderer,\n surface: ManuallyDrop::new(surface),\n colors: List::from(&config.colors),\n frame_timer: FrameTimer::new(),\n raw_window_handle,\n damage_tracker,\n glyph_cache,\n hint_state,\n size_info,\n font_size,\n window,\n pending_renderer_update: Default::default(),\n vi_highlighted_hint_age: Default::default(),\n highlighted_hint_age: Default::default(),\n vi_highlighted_hint: Default::default(),\n highlighted_hint: Default::default(),\n hint_mouse_point: Default::default(),\n pending_update: Default::default(),\n cursor_hidden: Default::default(),\n meter: Default::default(),\n ime: Default::default(),\n })\n }\n\n #[inline]\n pub fn gl_context(&self) -> &PossiblyCurrentContext {\n &self.context\n }\n\n pub fn make_not_current(&mut self) {\n if self.context.is_current() {\n self.context.make_not_current_in_place().expect(\"failed to disable context\");\n }\n }\n\n pub fn make_current(&mut self) {\n let is_current = self.context.is_current();\n\n // Attempt to make the context current if it's not.\n let context_loss = if is_current {\n self.renderer.was_context_reset()\n } else {\n match self.context.make_current(&self.surface) {\n Err(err) if err.error_kind() == ErrorKind::ContextLost => {\n info!(\"Context lost for window {:?}\", self.window.id());\n true\n },\n _ => false,\n }\n };\n\n if !context_loss {\n return;\n }\n\n let gl_display = self.context.display();\n let gl_config = self.context.config();\n let raw_window_handle = Some(self.window.raw_window_handle());\n let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)\n .expect(\"failed to recreate context.\");\n\n // Drop the old context and renderer.\n unsafe {\n ManuallyDrop::drop(&mut self.renderer);\n ManuallyDrop::drop(&mut self.context);\n }\n\n // Activate new context.\n let context = context.treat_as_possibly_current();\n self.context = ManuallyDrop::new(context);\n self.context.make_current(&self.surface).expect(\"failed to reativate context after reset.\");\n\n // Recreate renderer.\n let renderer = Renderer::new(&self.context, self.renderer_preference)\n .expect(\"failed to recreate renderer after reset\");\n self.renderer = ManuallyDrop::new(renderer);\n\n // Resize the renderer.\n self.renderer.resize(&self.size_info);\n\n self.reset_glyph_cache();\n self.damage_tracker.frame().mark_fully_damaged();\n\n debug!(\"Recovered window {:?} from gpu reset\", self.window.id());\n }\n\n fn swap_buffers(&self) {\n #[allow(clippy::single_match)]\n let res = match (self.surface.deref(), &self.context.deref()) {\n #[cfg(not(any(target_os = \"macos\", windows)))]\n (Surface::Egl(surface), PossiblyCurrentContext::Egl(context))\n if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))\n && !self.damage_tracker.debug =>\n {\n let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());\n surface.swap_buffers_with_damage(context, &damage)\n },\n (surface, context) => surface.swap_buffers(context),\n };\n if let Err(err) = res {\n debug!(\"error calling swap_buffers: {}\", err);\n }\n }\n\n /// Update font size and cell dimensions.\n ///\n /// This will return a tuple of the cell width and height.\n fn update_font_size(\n glyph_cache: &mut GlyphCache,\n config: &UiConfig,\n font: &Font,\n ) -> (f32, f32) {\n let _ = glyph_cache.update_font_size(font);\n\n // Compute new cell sizes.\n compute_cell_size(config, &glyph_cache.font_metrics())\n }\n\n /// Reset glyph cache.\n fn reset_glyph_cache(&mut self) {\n let cache = &mut self.glyph_cache;\n self.renderer.with_loader(|mut api| {\n cache.reset_glyph_cache(&mut api);\n });\n }\n\n // XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are\n // performed in [`Self::process_renderer_update`] right before drawing.\n //\n /// Process update events.\n pub fn handle_update<T>(\n &mut self,\n terminal: &mut Term<T>,\n pty_resize_handle: &mut dyn OnResize,\n message_buffer: &MessageBuffer,\n search_state: &mut SearchState,\n config: &UiConfig,\n ) where\n T: EventListener,\n {\n let pending_update = mem::take(&mut self.pending_update);\n\n let (mut cell_width, mut cell_height) =\n (self.size_info.cell_width(), self.size_info.cell_height());\n\n if pending_update.font().is_some() || pending_update.cursor_dirty() {\n let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());\n renderer_update.clear_font_cache = true\n }\n\n // Update font size and cell dimensions.\n if let Some(font) = pending_update.font() {\n let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);\n cell_width = cell_dimensions.0;\n cell_height = cell_dimensions.1;\n\n info!(\"Cell size: {} x {}\", cell_width, cell_height);\n\n // Mark entire terminal as damaged since glyph size could change without cell size\n // changes.\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n let (mut width, mut height) = (self.size_info.width(), self.size_info.height());\n if let Some(dimensions) = pending_update.dimensions() {\n width = dimensions.width as f32;\n height = dimensions.height as f32;\n }\n\n let padding = config.window.padding(self.window.scale_factor as f32);\n\n let mut new_size = SizeInfo::new(\n width,\n height,\n cell_width,\n cell_height,\n padding.0,\n padding.1,\n config.window.dynamic_padding,\n );\n\n // Update number of column/lines in the viewport.\n let search_active = search_state.history_index.is_some();\n let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());\n let search_lines = usize::from(search_active);\n new_size.reserve_lines(message_bar_lines + search_lines);\n\n // Update resize increments.\n if config.window.resize_increments {\n self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));\n }\n\n // Resize when terminal when its dimensions have changed.\n if self.size_info.screen_lines() != new_size.screen_lines\n || self.size_info.columns() != new_size.columns()\n {\n // Resize PTY.\n pty_resize_handle.on_resize(new_size.into());\n\n // Resize terminal.\n terminal.resize(new_size);\n\n // Resize damage tracking.\n self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());\n }\n\n // Check if dimensions have changed.\n if new_size != self.size_info {\n // Queue renderer update.\n let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());\n renderer_update.resize = true;\n\n // Clear focused search match.\n search_state.clear_focused_match();\n }\n self.size_info = new_size;\n }\n\n // NOTE: Renderer updates are split off, since platforms like Wayland require resize and other\n // OpenGL operations to be performed right before rendering. Otherwise they could lock the\n // back buffer and render with the previous state. This also solves flickering during resizes.\n //\n /// Update the state of the renderer.\n pub fn process_renderer_update(&mut self) {\n let renderer_update = match self.pending_renderer_update.take() {\n Some(renderer_update) => renderer_update,\n _ => return,\n };\n\n // Resize renderer.\n if renderer_update.resize {\n let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();\n let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();\n self.surface.resize(&self.context, width, height);\n }\n\n // Ensure we're modifying the correct OpenGL context.\n self.make_current();\n\n if renderer_update.clear_font_cache {\n self.reset_glyph_cache();\n }\n\n self.renderer.resize(&self.size_info);\n\n info!(\"Padding: {} x {}\", self.size_info.padding_x(), self.size_info.padding_y());\n info!(\"Width: {}, Height: {}\", self.size_info.width(), self.size_info.height());\n }\n\n /// Draw the screen.\n ///\n /// A reference to Term whose state is being drawn must be provided.\n ///\n /// This call may block if vsync is enabled.\n pub fn draw<T: EventListener>(\n &mut self,\n mut terminal: MutexGuard<'_, Term<T>>,\n scheduler: &mut Scheduler,\n message_buffer: &MessageBuffer,\n config: &UiConfig,\n search_state: &mut SearchState,\n ) {\n // Collect renderable content before the terminal is dropped.\n let mut content = RenderableContent::new(config, self, &terminal, search_state);\n let mut grid_cells = Vec::new();\n for cell in &mut content {\n grid_cells.push(cell);\n }\n let selection_range = content.selection_range();\n let foreground_color = content.color(NamedColor::Foreground as usize);\n let background_color = content.color(NamedColor::Background as usize);\n let display_offset = content.display_offset();\n let cursor = content.cursor();\n\n let cursor_point = terminal.grid().cursor.point;\n let total_lines = terminal.grid().total_lines();\n let metrics = self.glyph_cache.font_metrics();\n let size_info = self.size_info;\n\n let vi_mode = terminal.mode().contains(TermMode::VI);\n let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };\n\n // Add damage from the terminal.\n match terminal.damage() {\n TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),\n TermDamage::Partial(damaged_lines) => {\n for damage in damaged_lines {\n self.damage_tracker.frame().damage_line(damage);\n }\n },\n }\n terminal.reset_damage();\n\n // Drop terminal as early as possible to free lock.\n drop(terminal);\n\n // Invalidate highlighted hints if grid has changed.\n self.validate_hint_highlights(display_offset);\n\n // Add damage from alacritty's UI elements overlapping terminal.\n\n let requires_full_damage = self.visual_bell.intensity() != 0.\n || self.hint_state.active()\n || search_state.regex().is_some();\n if requires_full_damage {\n self.damage_tracker.frame().mark_fully_damaged();\n self.damage_tracker.next_frame().mark_fully_damaged();\n }\n\n let vi_cursor_viewport_point =\n vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));\n self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);\n self.damage_tracker.damage_selection(selection_range, display_offset);\n\n // Make sure this window's OpenGL context is active.\n self.make_current();\n\n self.renderer.clear(background_color, config.window_opacity());\n let mut lines = RenderLines::new();\n\n // Optimize loop hint comparator.\n let has_highlighted_hint =\n self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();\n\n // Draw grid.\n {\n let _sampler = self.meter.sampler();\n\n // Ensure macOS hasn't reset our viewport.\n #[cfg(target_os = \"macos\")]\n self.renderer.set_viewport(&size_info);\n\n let glyph_cache = &mut self.glyph_cache;\n let highlighted_hint = &self.highlighted_hint;\n let vi_highlighted_hint = &self.vi_highlighted_hint;\n let damage_tracker = &mut self.damage_tracker;\n\n let cells = grid_cells.into_iter().map(|mut cell| {\n // Underline hints hovered by mouse or vi mode cursor.\n if has_highlighted_hint {\n let point = term::viewport_to_point(display_offset, cell.point);\n let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());\n\n let should_highlight = |hint: &Option<HintMatch>| {\n hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))\n };\n if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {\n damage_tracker.frame().damage_point(cell.point);\n cell.flags.insert(Flags::UNDERLINE);\n }\n }\n\n // Update underline/strikeout.\n lines.update(&cell);\n\n cell\n });\n self.renderer.draw_cells(&size_info, glyph_cache, cells);\n }\n\n let mut rects = lines.rects(&metrics, &size_info);\n\n if let Some(vi_cursor_point) = vi_cursor_point {\n // Indicate vi mode by showing the cursor's position in the top right corner.\n let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;\n let obstructed_column = Some(vi_cursor_point)\n .filter(|point| point.line == -(display_offset as i32))\n .map(|point| point.column);\n self.draw_line_indicator(config, total_lines, obstructed_column, line);\n } else if search_state.regex().is_some() {\n // Show current display offset in vi-less search to indicate match position.\n self.draw_line_indicator(config, total_lines, None, display_offset);\n };\n\n // Draw cursor.\n rects.extend(cursor.rects(&size_info, config.cursor.thickness()));\n\n // Push visual bell after url/underline/strikeout rects.\n let visual_bell_intensity = self.visual_bell.intensity();\n if visual_bell_intensity != 0. {\n let visual_bell_rect = RenderRect::new(\n 0.,\n 0.,\n size_info.width(),\n size_info.height(),\n config.bell.color,\n visual_bell_intensity as f32,\n );\n rects.push(visual_bell_rect);\n }\n\n // Handle IME positioning and search bar rendering.\n let ime_position = match search_state.regex() {\n Some(regex) => {\n let search_label = match search_state.direction() {\n Direction::Right => FORWARD_SEARCH_LABEL,\n Direction::Left => BACKWARD_SEARCH_LABEL,\n };\n\n let search_text = Self::format_search(regex, search_label, size_info.columns());\n\n // Render the search bar.\n self.draw_search(config, &search_text);\n\n // Draw search bar cursor.\n let line = size_info.screen_lines();\n let column = Column(search_text.chars().count() - 1);\n\n // Add cursor to search bar if IME is not active.\n if self.ime.preedit().is_none() {\n let fg = config.colors.footer_bar_foreground();\n let shape = CursorShape::Underline;\n let cursor_width = NonZeroU32::new(1).unwrap();\n let cursor =\n RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);\n rects.extend(cursor.rects(&size_info, config.cursor.thickness()));\n }\n\n Some(Point::new(line, column))\n },\n None => {\n let num_lines = self.size_info.screen_lines();\n match vi_cursor_viewport_point {\n None => term::point_to_viewport(display_offset, cursor_point)\n .filter(|point| point.line < num_lines),\n point => point,\n }\n },\n };\n\n // Handle IME.\n if self.ime.is_enabled() {\n if let Some(point) = ime_position {\n let (fg, bg) = if search_state.regex().is_some() {\n (config.colors.footer_bar_foreground(), config.colors.footer_bar_background())\n } else {\n (foreground_color, background_color)\n };\n\n self.draw_ime_preview(point, fg, bg, &mut rects, config);\n }\n }\n\n if let Some(message) = message_buffer.message() {\n let search_offset = usize::from(search_state.regex().is_some());\n let text = message.text(&size_info);\n\n // Create a new rectangle for the background.\n let start_line = size_info.screen_lines() + search_offset;\n let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());\n\n let bg = match message.ty() {\n MessageType::Error => config.colors.normal.red,\n MessageType::Warning => config.colors.normal.yellow,\n };\n\n let x = 0;\n let width = size_info.width() as i32;\n let height = (size_info.height() - y) as i32;\n let message_bar_rect =\n RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);\n\n // Push message_bar in the end, so it'll be above all other content.\n rects.push(message_bar_rect);\n\n // Always damage message bar, since it could have messages of the same size in it.\n self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);\n\n // Draw rectangles.\n self.renderer.draw_rects(&size_info, &metrics, rects);\n\n // Relay messages to the user.\n let glyph_cache = &mut self.glyph_cache;\n let fg = config.colors.primary.background;\n for (i, message_text) in text.iter().enumerate() {\n let point = Point::new(start_line + i, Column(0));\n self.renderer.draw_string(\n point,\n fg,\n bg,\n message_text.chars(),\n &size_info,\n glyph_cache,\n );\n }\n } else {\n // Draw rectangles.\n self.renderer.draw_rects(&size_info, &metrics, rects);\n }\n\n self.draw_render_timer(config);\n\n // Draw hyperlink uri preview.\n if has_highlighted_hint {\n let cursor_point = vi_cursor_point.or(Some(cursor_point));\n self.draw_hyperlink_preview(config, cursor_point, display_offset);\n }\n\n // Notify winit that we're about to present.\n self.window.pre_present_notify();\n\n // Highlight damage for debugging.\n if self.damage_tracker.debug {\n let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());\n let mut rects = Vec::with_capacity(damage.len());\n self.highlight_damage(&mut rects);\n self.renderer.draw_rects(&self.size_info, &metrics, rects);\n }\n\n // Clearing debug highlights from the previous frame requires full redraw.\n self.swap_buffers();\n\n if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {\n // On X11 `swap_buffers` does not block for vsync. However the next OpenGl command\n // will block to synchronize (this is `glClear` in Alacritty), which causes a\n // permanent one frame delay.\n self.renderer.finish();\n }\n\n // XXX: Request the new frame after swapping buffers, so the\n // time to finish OpenGL operations is accounted for in the timeout.\n if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {\n self.request_frame(scheduler);\n }\n\n self.damage_tracker.swap_damage();\n }\n\n /// Update to a new configuration.\n pub fn update_config(&mut self, config: &UiConfig) {\n self.damage_tracker.debug = config.debug.highlight_damage;\n self.visual_bell.update_config(&config.bell);\n self.colors = List::from(&config.colors);\n }\n\n /// Update the mouse/vi mode cursor hint highlighting.\n ///\n /// This will return whether the highlighted hints changed.\n pub fn update_highlighted_hints<T>(\n &mut self,\n term: &Term<T>,\n config: &UiConfig,\n mouse: &Mouse,\n modifiers: ModifiersState,\n ) -> bool {\n // Update vi mode cursor hint.\n let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {\n let mods = ModifiersState::all();\n let point = term.vi_mode_cursor.point;\n hint::highlighted_at(term, config, point, mods)\n } else {\n None\n };\n let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;\n self.vi_highlighted_hint = vi_highlighted_hint;\n self.vi_highlighted_hint_age = 0;\n\n // Force full redraw if the vi mode highlight was cleared.\n if dirty {\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n // Abort if mouse highlighting conditions are not met.\n if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {\n if self.highlighted_hint.take().is_some() {\n self.damage_tracker.frame().mark_fully_damaged();\n dirty = true;\n }\n return dirty;\n }\n\n // Find highlighted hint at mouse position.\n let point = mouse.point(&self.size_info, term.grid().display_offset());\n let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);\n\n // Update cursor shape.\n if highlighted_hint.is_some() {\n // If mouse changed the line, we should update the hyperlink preview, since the\n // highlighted hint could be disrupted by the old preview.\n dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);\n self.hint_mouse_point = Some(point);\n self.window.set_mouse_cursor(CursorIcon::Pointer);\n } else if self.highlighted_hint.is_some() {\n self.hint_mouse_point = None;\n if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {\n self.window.set_mouse_cursor(CursorIcon::Default);\n } else {\n self.window.set_mouse_cursor(CursorIcon::Text);\n }\n }\n\n let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;\n dirty |= mouse_highlight_dirty;\n self.highlighted_hint = highlighted_hint;\n self.highlighted_hint_age = 0;\n\n // Force full redraw if the mouse cursor highlight was changed.\n if mouse_highlight_dirty {\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n dirty\n }\n\n #[inline(never)]\n fn draw_ime_preview(\n &mut self,\n point: Point<usize>,\n fg: Rgb,\n bg: Rgb,\n rects: &mut Vec<RenderRect>,\n config: &UiConfig,\n ) {\n let preedit = match self.ime.preedit() {\n Some(preedit) => preedit,\n None => {\n // In case we don't have preedit, just set the popup point.\n self.window.update_ime_position(point, &self.size_info);\n return;\n },\n };\n\n let num_cols = self.size_info.columns();\n\n // Get the visible preedit.\n let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {\n (Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(\n &preedit.text[byte_offset.0..],\n num_cols,\n ShortenDirection::Right,\n Some(SHORTENER),\n ),\n _ => {\n StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))\n },\n }\n .collect();\n\n let visible_len = visible_text.chars().count();\n\n let end = cmp::min(point.column.0 + visible_len, num_cols);\n let start = end.saturating_sub(visible_len);\n\n let start = Point::new(point.line, Column(start));\n let end = Point::new(point.line, Column(end - 1));\n\n let glyph_cache = &mut self.glyph_cache;\n let metrics = glyph_cache.font_metrics();\n\n self.renderer.draw_string(\n start,\n fg,\n bg,\n visible_text.chars(),\n &self.size_info,\n glyph_cache,\n );\n\n // Damage preedit inside the terminal viewport.\n if point.line < self.size_info.screen_lines() {\n let damage = LineDamageBounds::new(start.line, 0, num_cols);\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n }\n\n // Add underline for preedit text.\n let underline = RenderLine { start, end, color: fg };\n rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));\n\n let ime_popup_point = match preedit.cursor_end_offset {\n Some(cursor_end_offset) => {\n // Use hollow block when multiple characters are changed at once.\n let (shape, width) = if let Some(width) =\n NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)\n {\n (CursorShape::HollowBlock, width)\n } else {\n (CursorShape::Beam, NonZeroU32::new(1).unwrap())\n };\n\n let cursor_column = Column(\n (end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,\n );\n let cursor_point = Point::new(point.line, cursor_column);\n let cursor = RenderableCursor::new(cursor_point, shape, fg, width);\n rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));\n cursor_point\n },\n _ => end,\n };\n\n self.window.update_ime_position(ime_popup_point, &self.size_info);\n }\n\n /// Format search regex to account for the cursor and fullwidth characters.\n fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {\n let label_len = search_label.len();\n\n // Skip `search_regex` formatting if only label is visible.\n if label_len > max_width {\n return search_label[..max_width].to_owned();\n }\n\n // The search string consists of `search_label` + `search_regex` + `cursor`.\n let mut bar_text = String::from(search_label);\n bar_text.extend(StrShortener::new(\n search_regex,\n max_width.wrapping_sub(label_len + 1),\n ShortenDirection::Left,\n Some(SHORTENER),\n ));\n\n // Add place for cursor.\n bar_text.push(' ');\n\n bar_text\n }\n\n /// Draw preview for the currently highlighted `Hyperlink`.\n #[inline(never)]\n fn draw_hyperlink_preview(\n &mut self,\n config: &UiConfig,\n cursor_point: Option<Point>,\n display_offset: usize,\n ) {\n let num_cols = self.size_info.columns();\n let uris: Vec<_> = self\n .highlighted_hint\n .iter()\n .chain(&self.vi_highlighted_hint)\n .filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))\n .map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))\n .collect();\n\n if uris.is_empty() {\n return;\n }\n\n // The maximum amount of protected lines including the ones we'll show preview on.\n let max_protected_lines = uris.len() * 2;\n\n // Lines we shouldn't show preview on, because it'll obscure the highlighted hint.\n let mut protected_lines = Vec::with_capacity(max_protected_lines);\n if self.size_info.screen_lines() > max_protected_lines {\n // Prefer to show preview even when it'll likely obscure the highlighted hint, when\n // there's no place left for it.\n protected_lines.push(self.hint_mouse_point.map(|point| point.line));\n protected_lines.push(cursor_point.map(|point| point.line));\n }\n\n // Find the line in viewport we can draw preview on without obscuring protected lines.\n let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);\n let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);\n let uri_lines = (viewport_top.0..=viewport_bottom.0)\n .rev()\n .map(|line| Some(Line(line)))\n .filter_map(|line| {\n if protected_lines.contains(&line) {\n None\n } else {\n protected_lines.push(line);\n line\n }\n })\n .take(uris.len())\n .flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));\n\n let fg = config.colors.footer_bar_foreground();\n let bg = config.colors.footer_bar_background();\n for (uri, point) in uris.into_iter().zip(uri_lines) {\n // Damage the uri preview.\n let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);\n self.damage_tracker.frame().damage_line(damage);\n\n // Damage the uri preview for the next frame as well.\n self.damage_tracker.next_frame().damage_line(damage);\n\n self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);\n }\n }\n\n /// Draw current search regex.\n #[inline(never)]\n fn draw_search(&mut self, config: &UiConfig, text: &str) {\n // Assure text length is at least num_cols.\n let num_cols = self.size_info.columns();\n let text = format!(\"{text:<num_cols$}\");\n\n let point = Point::new(self.size_info.screen_lines(), Column(0));\n\n let fg = config.colors.footer_bar_foreground();\n let bg = config.colors.footer_bar_background();\n\n self.renderer.draw_string(\n point,\n fg,\n bg,\n text.chars(),\n &self.size_info,\n &mut self.glyph_cache,\n );\n }\n\n /// Draw render timer.\n #[inline(never)]\n fn draw_render_timer(&mut self, config: &UiConfig) {\n if !config.debug.render_timer {\n return;\n }\n\n let timing = format!(\"{:.3} usec\", self.meter.average());\n let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));\n let fg = config.colors.primary.background;\n let bg = config.colors.normal.red;\n\n // Damage render timer for current and next frame.\n let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n\n let glyph_cache = &mut self.glyph_cache;\n self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);\n }\n\n /// Draw an indicator for the position of a line in history.\n #[inline(never)]\n fn draw_line_indicator(\n &mut self,\n config: &UiConfig,\n total_lines: usize,\n obstructed_column: Option<Column>,\n line: usize,\n ) {\n let columns = self.size_info.columns();\n let text = format!(\"[{}/{}]\", line, total_lines - 1);\n let column = Column(self.size_info.columns().saturating_sub(text.len()));\n let point = Point::new(0, column);\n\n // Damage the line indicator for current and next frame.\n let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n\n let colors = &config.colors;\n let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);\n let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);\n\n // Do not render anything if it would obscure the vi mode cursor.\n if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {\n let glyph_cache = &mut self.glyph_cache;\n self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);\n }\n }\n\n /// Highlight damaged rects.\n ///\n /// This function is for debug purposes only.\n fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {\n for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {\n let x = damage_rect.x as f32;\n let height = damage_rect.height as f32;\n let width = damage_rect.width as f32;\n let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;\n let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);\n\n render_rects.push(render_rect);\n }\n }\n\n /// Check whether a hint highlight needs to be cleared.\n fn validate_hint_highlights(&mut self, display_offset: usize) {\n let frame = self.damage_tracker.frame();\n let hints = [\n (&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),\n (&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),\n ];\n\n let num_lines = self.size_info.screen_lines();\n for (hint, hint_age, reset_mouse) in hints {\n let (start, end) = match hint {\n Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),\n None => continue,\n };\n\n // Ignore hints that were created this frame.\n *hint_age += 1;\n if *hint_age == 1 {\n continue;\n }\n\n // Convert hint bounds to viewport coordinates.\n let start = term::point_to_viewport(display_offset, start)\n .filter(|point| point.line < num_lines)\n .unwrap_or_default();\n let end = term::point_to_viewport(display_offset, end)\n .filter(|point| point.line < num_lines)\n .unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));\n\n // Clear invalidated hints.\n if frame.intersects(start, end) {\n if reset_mouse {\n self.window.set_mouse_cursor(CursorIcon::Default);\n }\n frame.mark_fully_damaged();\n *hint = None;\n }\n }\n }\n\n /// Request a new frame for a window on Wayland.\n fn request_frame(&mut self, scheduler: &mut Scheduler) {\n // Mark that we've used a frame.\n self.window.has_frame = false;\n\n // Get the display vblank interval.\n let monitor_vblank_interval = 1_000_000.\n / self\n .window\n .current_monitor()\n .and_then(|monitor| monitor.refresh_rate_millihertz())\n .unwrap_or(60_000) as f64;\n\n // Now convert it to micro seconds.\n let monitor_vblank_interval =\n Duration::from_micros((1000. * monitor_vblank_interval) as u64);\n\n let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);\n\n let window_id = self.window.id();\n let timer_id = TimerId::new(Topic::Frame, window_id);\n let event = Event::new(EventType::Frame, window_id);\n\n scheduler.schedule(event, swap_timeout, false, timer_id);\n }\n}",
"class_signature": "impl Display"
} |
format_search | alacritty-master/alacritty/src/display/mod.rs | fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {
let label_len = search_label.len();
// Skip `search_regex` formatting if only label is visible.
if label_len > max_width {
return search_label[..max_width].to_owned();
}
// The search string consists of `search_label` + `search_regex` + `cursor`.
let mut bar_text = String::from(search_label);
bar_text.extend(StrShortener::new(
search_regex,
max_width.wrapping_sub(label_len + 1),
ShortenDirection::Left,
Some(SHORTENER),
));
// Add place for cursor.
bar_text.push(' ');
bar_text
} | //! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::cmp;
use std::fmt::{self, Formatter};
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU32;
use std::ops::Deref;
use std::time::{Duration, Instant};
use glutin::config::GetGlConfig;
use glutin::context::{NotCurrentContext, PossiblyCurrentContext};
use glutin::display::GetGlDisplay;
use glutin::error::ErrorKind;
use glutin::prelude::*;
use glutin::surface::{Surface, SwapInterval, WindowSurface};
use log::{debug, info};
use parking_lot::MutexGuard;
use serde::{Deserialize, Serialize};
use winit::dpi::PhysicalSize;
use winit::keyboard::ModifiersState;
use winit::raw_window_handle::RawWindowHandle;
use winit::window::CursorIcon;
use crossfont::{Rasterize, Rasterizer, Size as FontSize};
use unicode_width::UnicodeWidthChar;
use alacritty_terminal::event::{EventListener, OnResize, WindowSize};
use alacritty_terminal::grid::Dimensions as TermDimensions;
use alacritty_terminal::index::{Column, Direction, Line, Point};
use alacritty_terminal::selection::Selection;
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::{
self, LineDamageBounds, Term, TermDamage, TermMode, MIN_COLUMNS, MIN_SCREEN_LINES,
};
use alacritty_terminal::vte::ansi::{CursorShape, NamedColor};
use crate::config::debug::RendererPreference;
use crate::config::font::Font;
use crate::config::window::Dimensions;
#[cfg(not(windows))]
use crate::config::window::StartupMode;
use crate::config::UiConfig;
use crate::display::bell::VisualBell;
use crate::display::color::{List, Rgb};
use crate::display::content::{RenderableContent, RenderableCursor};
use crate::display::cursor::IntoRects;
use crate::display::damage::{damage_y_to_viewport_y, DamageTracker};
use crate::display::hint::{HintMatch, HintState};
use crate::display::meter::Meter;
use crate::display::window::Window;
use crate::event::{Event, EventType, Mouse, SearchState};
use crate::message_bar::{MessageBuffer, MessageType};
use crate::renderer::rects::{RenderLine, RenderLines, RenderRect};
use crate::renderer::{self, platform, GlyphCache, Renderer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::string::{ShortenDirection, StrShortener};
pub mod color;
pub mod content;
pub mod cursor;
pub mod hint;
pub mod window;
mod bell;
mod damage;
mod meter;
/// Label for the forward terminal search bar.
const FORWARD_SEARCH_LABEL: &str = "Search: ";
/// Label for the backward terminal search bar.
const BACKWARD_SEARCH_LABEL: &str = "Backward Search: ";
/// The character used to shorten the visible text like uri preview or search regex.
const SHORTENER: char = '…';
/// Color which is used to highlight damaged rects when debugging.
const DAMAGE_RECT_COLOR: Rgb = Rgb::new(255, 0, 255);
#[derive(Debug)]
pub enum Error {
/// Error with window management.
Window(window::Error),
/// Error dealing with fonts.
Font(crossfont::Error),
/// Error in renderer.
Render(renderer::Error),
/// Error during context operations.
Context(glutin::error::Error),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Window(err) => err.source(),
Error::Font(err) => err.source(),
Error::Render(err) => err.source(),
Error::Context(err) => err.source(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::Window(err) => err.fmt(f),
Error::Font(err) => err.fmt(f),
Error::Render(err) => err.fmt(f),
Error::Context(err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Self {
Error::Window(val)
}
}
impl From<crossfont::Error> for Error {
fn from(val: crossfont::Error) -> Self {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Self {
Error::Render(val)
}
}
impl From<glutin::error::Error> for Error {
fn from(val: glutin::error::Error) -> Self {
Error::Context(val)
}
}
/// Terminal size info.
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub struct SizeInfo<T = f32> {
/// Terminal window width.
width: T,
/// Terminal window height.
height: T,
/// Width of individual cell.
cell_width: T,
/// Height of individual cell.
cell_height: T,
/// Horizontal window padding.
padding_x: T,
/// Vertical window padding.
padding_y: T,
/// Number of lines in the viewport.
screen_lines: usize,
/// Number of columns in the viewport.
columns: usize,
}
impl From<SizeInfo<f32>> for SizeInfo<u32> {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
width: size_info.width as u32,
height: size_info.height as u32,
cell_width: size_info.cell_width as u32,
cell_height: size_info.cell_height as u32,
padding_x: size_info.padding_x as u32,
padding_y: size_info.padding_y as u32,
screen_lines: size_info.screen_lines,
columns: size_info.screen_lines,
}
}
}
impl From<SizeInfo<f32>> for WindowSize {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
num_cols: size_info.columns() as u16,
num_lines: size_info.screen_lines() as u16,
cell_width: size_info.cell_width() as u16,
cell_height: size_info.cell_height() as u16,
}
}
}
impl<T: Clone + Copy> SizeInfo<T> {
#[inline]
pub fn width(&self) -> T {
self.width
}
#[inline]
pub fn height(&self) -> T {
self.height
}
#[inline]
pub fn cell_width(&self) -> T {
self.cell_width
}
#[inline]
pub fn cell_height(&self) -> T {
self.cell_height
}
#[inline]
pub fn padding_x(&self) -> T {
self.padding_x
}
#[inline]
pub fn padding_y(&self) -> T {
self.padding_y
}
}
impl SizeInfo<f32> {
#[allow(clippy::too_many_arguments)]
pub fn new(
width: f32,
height: f32,
cell_width: f32,
cell_height: f32,
mut padding_x: f32,
mut padding_y: f32,
dynamic_padding: bool,
) -> SizeInfo {
if dynamic_padding {
padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);
padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);
}
let lines = (height - 2. * padding_y) / cell_height;
let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);
let columns = (width - 2. * padding_x) / cell_width;
let columns = cmp::max(columns as usize, MIN_COLUMNS);
SizeInfo {
width,
height,
cell_width,
cell_height,
padding_x: padding_x.floor(),
padding_y: padding_y.floor(),
screen_lines,
columns,
}
}
#[inline]
pub fn reserve_lines(&mut self, count: usize) {
self.screen_lines = cmp::max(self.screen_lines.saturating_sub(count), MIN_SCREEN_LINES);
}
/// Check if coordinates are inside the terminal grid.
///
/// The padding, message bar or search are not counted as part of the grid.
#[inline]
pub fn contains_point(&self, x: usize, y: usize) -> bool {
x <= (self.padding_x + self.columns as f32 * self.cell_width) as usize
&& x > self.padding_x as usize
&& y <= (self.padding_y + self.screen_lines as f32 * self.cell_height) as usize
&& y > self.padding_y as usize
}
/// Calculate padding to spread it evenly around the terminal content.
#[inline]
fn dynamic_padding(padding: f32, dimension: f32, cell_dimension: f32) -> f32 {
padding + ((dimension - 2. * padding) % cell_dimension) / 2.
}
}
impl TermDimensions for SizeInfo {
#[inline]
fn columns(&self) -> usize {
self.columns
}
#[inline]
fn screen_lines(&self) -> usize {
self.screen_lines
}
#[inline]
fn total_lines(&self) -> usize {
self.screen_lines()
}
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct DisplayUpdate {
pub dirty: bool,
dimensions: Option<PhysicalSize<u32>>,
cursor_dirty: bool,
font: Option<Font>,
}
impl DisplayUpdate {
pub fn dimensions(&self) -> Option<PhysicalSize<u32>> {
self.dimensions
}
pub fn font(&self) -> Option<&Font> {
self.font.as_ref()
}
pub fn cursor_dirty(&self) -> bool {
self.cursor_dirty
}
pub fn set_dimensions(&mut self, dimensions: PhysicalSize<u32>) {
self.dimensions = Some(dimensions);
self.dirty = true;
}
pub fn set_font(&mut self, font: Font) {
self.font = Some(font);
self.dirty = true;
}
pub fn set_cursor_dirty(&mut self) {
self.cursor_dirty = true;
self.dirty = true;
}
}
/// The display wraps a window, font rasterizer, and GPU renderer.
pub struct Display {
pub window: Window,
pub size_info: SizeInfo,
/// Hint highlighted by the mouse.
pub highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
highlighted_hint_age: usize,
/// Hint highlighted by the vi mode cursor.
pub vi_highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
vi_highlighted_hint_age: usize,
pub raw_window_handle: RawWindowHandle,
/// UI cursor visibility for blinking.
pub cursor_hidden: bool,
pub visual_bell: VisualBell,
/// Mapped RGB values for each terminal color.
pub colors: List,
/// State of the keyboard hints.
pub hint_state: HintState,
/// Unprocessed display updates.
pub pending_update: DisplayUpdate,
/// The renderer update that takes place only once before the actual rendering.
pub pending_renderer_update: Option<RendererUpdate>,
/// The ime on the given display.
pub ime: Ime,
/// The state of the timer for frame scheduling.
pub frame_timer: FrameTimer,
/// Damage tracker for the given display.
pub damage_tracker: DamageTracker,
/// Font size used by the window.
pub font_size: FontSize,
// Mouse point position when highlighting hints.
hint_mouse_point: Option<Point>,
renderer: ManuallyDrop<Renderer>,
renderer_preference: Option<RendererPreference>,
surface: ManuallyDrop<Surface<WindowSurface>>,
context: ManuallyDrop<PossiblyCurrentContext>,
glyph_cache: GlyphCache,
meter: Meter,
}
impl Display {
pub fn new(
window: Window,
gl_context: NotCurrentContext,
config: &UiConfig,
_tabbed: bool,
) -> Result<Display, Error> {
let raw_window_handle = window.raw_window_handle();
let scale_factor = window.scale_factor as f32;
let rasterizer = Rasterizer::new()?;
let font_size = config.font.size().scale(scale_factor);
debug!("Loading \"{}\" font", &config.font.normal().family);
let font = config.font.clone().with_size(font_size);
let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;
let metrics = glyph_cache.font_metrics();
let (cell_width, cell_height) = compute_cell_size(config, &metrics);
// Resize the window to account for the user configured size.
if let Some(dimensions) = config.window.dimensions() {
let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);
window.request_inner_size(size);
}
// Create the GL surface to draw into.
let surface = platform::create_gl_surface(
&gl_context,
window.inner_size(),
window.raw_window_handle(),
)?;
// Make the context current.
let context = gl_context.make_current(&surface)?;
// Create renderer.
let mut renderer = Renderer::new(&context, config.debug.renderer)?;
// Load font common glyphs to accelerate rendering.
debug!("Filling glyph cache with common glyphs");
renderer.with_loader(|mut api| {
glyph_cache.reset_glyph_cache(&mut api);
});
let padding = config.window.padding(window.scale_factor as f32);
let viewport_size = window.inner_size();
// Create new size with at least one column and row.
let size_info = SizeInfo::new(
viewport_size.width as f32,
viewport_size.height as f32,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding && config.window.dimensions().is_none(),
);
info!("Cell size: {} x {}", cell_width, cell_height);
info!("Padding: {} x {}", size_info.padding_x(), size_info.padding_y());
info!("Width: {}, Height: {}", size_info.width(), size_info.height());
// Update OpenGL projection.
renderer.resize(&size_info);
// Clear screen.
let background_color = config.colors.primary.background;
renderer.clear(background_color, config.window_opacity());
// Disable shadows for transparent windows on macOS.
#[cfg(target_os = "macos")]
window.set_has_shadow(config.window_opacity() >= 1.0);
let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));
// On Wayland we can safely ignore this call, since the window isn't visible until you
// actually draw something into it and commit those changes.
if !is_wayland {
surface.swap_buffers(&context).expect("failed to swap buffers.");
renderer.finish();
}
// Set resize increments for the newly created window.
if config.window.resize_increments {
window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
window.set_visible(true);
// Always focus new windows, even if no Alacritty window is currently focused.
#[cfg(target_os = "macos")]
window.focus_window();
#[allow(clippy::single_match)]
#[cfg(not(windows))]
if !_tabbed {
match config.window.startup_mode {
#[cfg(target_os = "macos")]
StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),
StartupMode::Maximized if !is_wayland => window.set_maximized(true),
_ => (),
}
}
let hint_state = HintState::new(config.hints.alphabet());
let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());
damage_tracker.debug = config.debug.highlight_damage;
// Disable vsync.
if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {
info!("Failed to disable vsync: {}", err);
}
Ok(Self {
context: ManuallyDrop::new(context),
visual_bell: VisualBell::from(&config.bell),
renderer: ManuallyDrop::new(renderer),
renderer_preference: config.debug.renderer,
surface: ManuallyDrop::new(surface),
colors: List::from(&config.colors),
frame_timer: FrameTimer::new(),
raw_window_handle,
damage_tracker,
glyph_cache,
hint_state,
size_info,
font_size,
window,
pending_renderer_update: Default::default(),
vi_highlighted_hint_age: Default::default(),
highlighted_hint_age: Default::default(),
vi_highlighted_hint: Default::default(),
highlighted_hint: Default::default(),
hint_mouse_point: Default::default(),
pending_update: Default::default(),
cursor_hidden: Default::default(),
meter: Default::default(),
ime: Default::default(),
})
}
#[inline]
pub fn gl_context(&self) -> &PossiblyCurrentContext {
&self.context
}
pub fn make_not_current(&mut self) {
if self.context.is_current() {
self.context.make_not_current_in_place().expect("failed to disable context");
}
}
pub fn make_current(&mut self) {
let is_current = self.context.is_current();
// Attempt to make the context current if it's not.
let context_loss = if is_current {
self.renderer.was_context_reset()
} else {
match self.context.make_current(&self.surface) {
Err(err) if err.error_kind() == ErrorKind::ContextLost => {
info!("Context lost for window {:?}", self.window.id());
true
},
_ => false,
}
};
if !context_loss {
return;
}
let gl_display = self.context.display();
let gl_config = self.context.config();
let raw_window_handle = Some(self.window.raw_window_handle());
let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)
.expect("failed to recreate context.");
// Drop the old context and renderer.
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
}
// Activate new context.
let context = context.treat_as_possibly_current();
self.context = ManuallyDrop::new(context);
self.context.make_current(&self.surface).expect("failed to reativate context after reset.");
// Recreate renderer.
let renderer = Renderer::new(&self.context, self.renderer_preference)
.expect("failed to recreate renderer after reset");
self.renderer = ManuallyDrop::new(renderer);
// Resize the renderer.
self.renderer.resize(&self.size_info);
self.reset_glyph_cache();
self.damage_tracker.frame().mark_fully_damaged();
debug!("Recovered window {:?} from gpu reset", self.window.id());
}
fn swap_buffers(&self) {
#[allow(clippy::single_match)]
let res = match (self.surface.deref(), &self.context.deref()) {
#[cfg(not(any(target_os = "macos", windows)))]
(Surface::Egl(surface), PossiblyCurrentContext::Egl(context))
if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))
&& !self.damage_tracker.debug =>
{
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
surface.swap_buffers_with_damage(context, &damage)
},
(surface, context) => surface.swap_buffers(context),
};
if let Err(err) = res {
debug!("error calling swap_buffers: {}", err);
}
}
/// Update font size and cell dimensions.
///
/// This will return a tuple of the cell width and height.
fn update_font_size(
glyph_cache: &mut GlyphCache,
config: &UiConfig,
font: &Font,
) -> (f32, f32) {
let _ = glyph_cache.update_font_size(font);
// Compute new cell sizes.
compute_cell_size(config, &glyph_cache.font_metrics())
}
/// Reset glyph cache.
fn reset_glyph_cache(&mut self) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
cache.reset_glyph_cache(&mut api);
});
}
// XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are
// performed in [`Self::process_renderer_update`] right before drawing.
//
/// Process update events.
pub fn handle_update<T>(
&mut self,
terminal: &mut Term<T>,
pty_resize_handle: &mut dyn OnResize,
message_buffer: &MessageBuffer,
search_state: &mut SearchState,
config: &UiConfig,
) where
T: EventListener,
{
let pending_update = mem::take(&mut self.pending_update);
let (mut cell_width, mut cell_height) =
(self.size_info.cell_width(), self.size_info.cell_height());
if pending_update.font().is_some() || pending_update.cursor_dirty() {
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.clear_font_cache = true
}
// Update font size and cell dimensions.
if let Some(font) = pending_update.font() {
let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);
cell_width = cell_dimensions.0;
cell_height = cell_dimensions.1;
info!("Cell size: {} x {}", cell_width, cell_height);
// Mark entire terminal as damaged since glyph size could change without cell size
// changes.
self.damage_tracker.frame().mark_fully_damaged();
}
let (mut width, mut height) = (self.size_info.width(), self.size_info.height());
if let Some(dimensions) = pending_update.dimensions() {
width = dimensions.width as f32;
height = dimensions.height as f32;
}
let padding = config.window.padding(self.window.scale_factor as f32);
let mut new_size = SizeInfo::new(
width,
height,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding,
);
// Update number of column/lines in the viewport.
let search_active = search_state.history_index.is_some();
let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());
let search_lines = usize::from(search_active);
new_size.reserve_lines(message_bar_lines + search_lines);
// Update resize increments.
if config.window.resize_increments {
self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
// Resize when terminal when its dimensions have changed.
if self.size_info.screen_lines() != new_size.screen_lines
|| self.size_info.columns() != new_size.columns()
{
// Resize PTY.
pty_resize_handle.on_resize(new_size.into());
// Resize terminal.
terminal.resize(new_size);
// Resize damage tracking.
self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());
}
// Check if dimensions have changed.
if new_size != self.size_info {
// Queue renderer update.
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.resize = true;
// Clear focused search match.
search_state.clear_focused_match();
}
self.size_info = new_size;
}
// NOTE: Renderer updates are split off, since platforms like Wayland require resize and other
// OpenGL operations to be performed right before rendering. Otherwise they could lock the
// back buffer and render with the previous state. This also solves flickering during resizes.
//
/// Update the state of the renderer.
pub fn process_renderer_update(&mut self) {
let renderer_update = match self.pending_renderer_update.take() {
Some(renderer_update) => renderer_update,
_ => return,
};
// Resize renderer.
if renderer_update.resize {
let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();
let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();
self.surface.resize(&self.context, width, height);
}
// Ensure we're modifying the correct OpenGL context.
self.make_current();
if renderer_update.clear_font_cache {
self.reset_glyph_cache();
}
self.renderer.resize(&self.size_info);
info!("Padding: {} x {}", self.size_info.padding_x(), self.size_info.padding_y());
info!("Width: {}, Height: {}", self.size_info.width(), self.size_info.height());
}
/// Draw the screen.
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled.
pub fn draw<T: EventListener>(
&mut self,
mut terminal: MutexGuard<'_, Term<T>>,
scheduler: &mut Scheduler,
message_buffer: &MessageBuffer,
config: &UiConfig,
search_state: &mut SearchState,
) {
// Collect renderable content before the terminal is dropped.
let mut content = RenderableContent::new(config, self, &terminal, search_state);
let mut grid_cells = Vec::new();
for cell in &mut content {
grid_cells.push(cell);
}
let selection_range = content.selection_range();
let foreground_color = content.color(NamedColor::Foreground as usize);
let background_color = content.color(NamedColor::Background as usize);
let display_offset = content.display_offset();
let cursor = content.cursor();
let cursor_point = terminal.grid().cursor.point;
let total_lines = terminal.grid().total_lines();
let metrics = self.glyph_cache.font_metrics();
let size_info = self.size_info;
let vi_mode = terminal.mode().contains(TermMode::VI);
let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };
// Add damage from the terminal.
match terminal.damage() {
TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),
TermDamage::Partial(damaged_lines) => {
for damage in damaged_lines {
self.damage_tracker.frame().damage_line(damage);
}
},
}
terminal.reset_damage();
// Drop terminal as early as possible to free lock.
drop(terminal);
// Invalidate highlighted hints if grid has changed.
self.validate_hint_highlights(display_offset);
// Add damage from alacritty's UI elements overlapping terminal.
let requires_full_damage = self.visual_bell.intensity() != 0.
|| self.hint_state.active()
|| search_state.regex().is_some();
if requires_full_damage {
self.damage_tracker.frame().mark_fully_damaged();
self.damage_tracker.next_frame().mark_fully_damaged();
}
let vi_cursor_viewport_point =
vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));
self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);
self.damage_tracker.damage_selection(selection_range, display_offset);
// Make sure this window's OpenGL context is active.
self.make_current();
self.renderer.clear(background_color, config.window_opacity());
let mut lines = RenderLines::new();
// Optimize loop hint comparator.
let has_highlighted_hint =
self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();
// Draw grid.
{
let _sampler = self.meter.sampler();
// Ensure macOS hasn't reset our viewport.
#[cfg(target_os = "macos")]
self.renderer.set_viewport(&size_info);
let glyph_cache = &mut self.glyph_cache;
let highlighted_hint = &self.highlighted_hint;
let vi_highlighted_hint = &self.vi_highlighted_hint;
let damage_tracker = &mut self.damage_tracker;
let cells = grid_cells.into_iter().map(|mut cell| {
// Underline hints hovered by mouse or vi mode cursor.
if has_highlighted_hint {
let point = term::viewport_to_point(display_offset, cell.point);
let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());
let should_highlight = |hint: &Option<HintMatch>| {
hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))
};
if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {
damage_tracker.frame().damage_point(cell.point);
cell.flags.insert(Flags::UNDERLINE);
}
}
// Update underline/strikeout.
lines.update(&cell);
cell
});
self.renderer.draw_cells(&size_info, glyph_cache, cells);
}
let mut rects = lines.rects(&metrics, &size_info);
if let Some(vi_cursor_point) = vi_cursor_point {
// Indicate vi mode by showing the cursor's position in the top right corner.
let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;
let obstructed_column = Some(vi_cursor_point)
.filter(|point| point.line == -(display_offset as i32))
.map(|point| point.column);
self.draw_line_indicator(config, total_lines, obstructed_column, line);
} else if search_state.regex().is_some() {
// Show current display offset in vi-less search to indicate match position.
self.draw_line_indicator(config, total_lines, None, display_offset);
};
// Draw cursor.
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
// Push visual bell after url/underline/strikeout rects.
let visual_bell_intensity = self.visual_bell.intensity();
if visual_bell_intensity != 0. {
let visual_bell_rect = RenderRect::new(
0.,
0.,
size_info.width(),
size_info.height(),
config.bell.color,
visual_bell_intensity as f32,
);
rects.push(visual_bell_rect);
}
// Handle IME positioning and search bar rendering.
let ime_position = match search_state.regex() {
Some(regex) => {
let search_label = match search_state.direction() {
Direction::Right => FORWARD_SEARCH_LABEL,
Direction::Left => BACKWARD_SEARCH_LABEL,
};
let search_text = Self::format_search(regex, search_label, size_info.columns());
// Render the search bar.
self.draw_search(config, &search_text);
// Draw search bar cursor.
let line = size_info.screen_lines();
let column = Column(search_text.chars().count() - 1);
// Add cursor to search bar if IME is not active.
if self.ime.preedit().is_none() {
let fg = config.colors.footer_bar_foreground();
let shape = CursorShape::Underline;
let cursor_width = NonZeroU32::new(1).unwrap();
let cursor =
RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
}
Some(Point::new(line, column))
},
None => {
let num_lines = self.size_info.screen_lines();
match vi_cursor_viewport_point {
None => term::point_to_viewport(display_offset, cursor_point)
.filter(|point| point.line < num_lines),
point => point,
}
},
};
// Handle IME.
if self.ime.is_enabled() {
if let Some(point) = ime_position {
let (fg, bg) = if search_state.regex().is_some() {
(config.colors.footer_bar_foreground(), config.colors.footer_bar_background())
} else {
(foreground_color, background_color)
};
self.draw_ime_preview(point, fg, bg, &mut rects, config);
}
}
if let Some(message) = message_buffer.message() {
let search_offset = usize::from(search_state.regex().is_some());
let text = message.text(&size_info);
// Create a new rectangle for the background.
let start_line = size_info.screen_lines() + search_offset;
let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());
let bg = match message.ty() {
MessageType::Error => config.colors.normal.red,
MessageType::Warning => config.colors.normal.yellow,
};
let x = 0;
let width = size_info.width() as i32;
let height = (size_info.height() - y) as i32;
let message_bar_rect =
RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);
// Push message_bar in the end, so it'll be above all other content.
rects.push(message_bar_rect);
// Always damage message bar, since it could have messages of the same size in it.
self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
// Relay messages to the user.
let glyph_cache = &mut self.glyph_cache;
let fg = config.colors.primary.background;
for (i, message_text) in text.iter().enumerate() {
let point = Point::new(start_line + i, Column(0));
self.renderer.draw_string(
point,
fg,
bg,
message_text.chars(),
&size_info,
glyph_cache,
);
}
} else {
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
}
self.draw_render_timer(config);
// Draw hyperlink uri preview.
if has_highlighted_hint {
let cursor_point = vi_cursor_point.or(Some(cursor_point));
self.draw_hyperlink_preview(config, cursor_point, display_offset);
}
// Notify winit that we're about to present.
self.window.pre_present_notify();
// Highlight damage for debugging.
if self.damage_tracker.debug {
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
let mut rects = Vec::with_capacity(damage.len());
self.highlight_damage(&mut rects);
self.renderer.draw_rects(&self.size_info, &metrics, rects);
}
// Clearing debug highlights from the previous frame requires full redraw.
self.swap_buffers();
if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {
// On X11 `swap_buffers` does not block for vsync. However the next OpenGl command
// will block to synchronize (this is `glClear` in Alacritty), which causes a
// permanent one frame delay.
self.renderer.finish();
}
// XXX: Request the new frame after swapping buffers, so the
// time to finish OpenGL operations is accounted for in the timeout.
if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {
self.request_frame(scheduler);
}
self.damage_tracker.swap_damage();
}
/// Update to a new configuration.
pub fn update_config(&mut self, config: &UiConfig) {
self.damage_tracker.debug = config.debug.highlight_damage;
self.visual_bell.update_config(&config.bell);
self.colors = List::from(&config.colors);
}
/// Update the mouse/vi mode cursor hint highlighting.
///
/// This will return whether the highlighted hints changed.
pub fn update_highlighted_hints<T>(
&mut self,
term: &Term<T>,
config: &UiConfig,
mouse: &Mouse,
modifiers: ModifiersState,
) -> bool {
// Update vi mode cursor hint.
let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {
let mods = ModifiersState::all();
let point = term.vi_mode_cursor.point;
hint::highlighted_at(term, config, point, mods)
} else {
None
};
let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;
self.vi_highlighted_hint = vi_highlighted_hint;
self.vi_highlighted_hint_age = 0;
// Force full redraw if the vi mode highlight was cleared.
if dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
// Abort if mouse highlighting conditions are not met.
if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {
if self.highlighted_hint.take().is_some() {
self.damage_tracker.frame().mark_fully_damaged();
dirty = true;
}
return dirty;
}
// Find highlighted hint at mouse position.
let point = mouse.point(&self.size_info, term.grid().display_offset());
let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);
// Update cursor shape.
if highlighted_hint.is_some() {
// If mouse changed the line, we should update the hyperlink preview, since the
// highlighted hint could be disrupted by the old preview.
dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);
self.hint_mouse_point = Some(point);
self.window.set_mouse_cursor(CursorIcon::Pointer);
} else if self.highlighted_hint.is_some() {
self.hint_mouse_point = None;
if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {
self.window.set_mouse_cursor(CursorIcon::Default);
} else {
self.window.set_mouse_cursor(CursorIcon::Text);
}
}
let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;
dirty |= mouse_highlight_dirty;
self.highlighted_hint = highlighted_hint;
self.highlighted_hint_age = 0;
// Force full redraw if the mouse cursor highlight was changed.
if mouse_highlight_dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
dirty
}
#[inline(never)]
fn draw_ime_preview(
&mut self,
point: Point<usize>,
fg: Rgb,
bg: Rgb,
rects: &mut Vec<RenderRect>,
config: &UiConfig,
) {
let preedit = match self.ime.preedit() {
Some(preedit) => preedit,
None => {
// In case we don't have preedit, just set the popup point.
self.window.update_ime_position(point, &self.size_info);
return;
},
};
let num_cols = self.size_info.columns();
// Get the visible preedit.
let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {
(Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(
&preedit.text[byte_offset.0..],
num_cols,
ShortenDirection::Right,
Some(SHORTENER),
),
_ => {
StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))
},
}
.collect();
let visible_len = visible_text.chars().count();
let end = cmp::min(point.column.0 + visible_len, num_cols);
let start = end.saturating_sub(visible_len);
let start = Point::new(point.line, Column(start));
let end = Point::new(point.line, Column(end - 1));
let glyph_cache = &mut self.glyph_cache;
let metrics = glyph_cache.font_metrics();
self.renderer.draw_string(
start,
fg,
bg,
visible_text.chars(),
&self.size_info,
glyph_cache,
);
// Damage preedit inside the terminal viewport.
if point.line < self.size_info.screen_lines() {
let damage = LineDamageBounds::new(start.line, 0, num_cols);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
}
// Add underline for preedit text.
let underline = RenderLine { start, end, color: fg };
rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));
let ime_popup_point = match preedit.cursor_end_offset {
Some(cursor_end_offset) => {
// Use hollow block when multiple characters are changed at once.
let (shape, width) = if let Some(width) =
NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)
{
(CursorShape::HollowBlock, width)
} else {
(CursorShape::Beam, NonZeroU32::new(1).unwrap())
};
let cursor_column = Column(
(end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,
);
let cursor_point = Point::new(point.line, cursor_column);
let cursor = RenderableCursor::new(cursor_point, shape, fg, width);
rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));
cursor_point
},
_ => end,
};
self.window.update_ime_position(ime_popup_point, &self.size_info);
}
/// Format search regex to account for the cursor and fullwidth characters.
fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {
let label_len = search_label.len();
// Skip `search_regex` formatting if only label is visible.
if label_len > max_width {
return search_label[..max_width].to_owned();
}
// The search string consists of `search_label` + `search_regex` + `cursor`.
let mut bar_text = String::from(search_label);
bar_text.extend(StrShortener::new(
search_regex,
max_width.wrapping_sub(label_len + 1),
ShortenDirection::Left,
Some(SHORTENER),
));
// Add place for cursor.
bar_text.push(' ');
bar_text
}
/// Draw preview for the currently highlighted `Hyperlink`.
#[inline(never)]
fn draw_hyperlink_preview(
&mut self,
config: &UiConfig,
cursor_point: Option<Point>,
display_offset: usize,
) {
let num_cols = self.size_info.columns();
let uris: Vec<_> = self
.highlighted_hint
.iter()
.chain(&self.vi_highlighted_hint)
.filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))
.map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))
.collect();
if uris.is_empty() {
return;
}
// The maximum amount of protected lines including the ones we'll show preview on.
let max_protected_lines = uris.len() * 2;
// Lines we shouldn't show preview on, because it'll obscure the highlighted hint.
let mut protected_lines = Vec::with_capacity(max_protected_lines);
if self.size_info.screen_lines() > max_protected_lines {
// Prefer to show preview even when it'll likely obscure the highlighted hint, when
// there's no place left for it.
protected_lines.push(self.hint_mouse_point.map(|point| point.line));
protected_lines.push(cursor_point.map(|point| point.line));
}
// Find the line in viewport we can draw preview on without obscuring protected lines.
let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);
let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);
let uri_lines = (viewport_top.0..=viewport_bottom.0)
.rev()
.map(|line| Some(Line(line)))
.filter_map(|line| {
if protected_lines.contains(&line) {
None
} else {
protected_lines.push(line);
line
}
})
.take(uris.len())
.flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
for (uri, point) in uris.into_iter().zip(uri_lines) {
// Damage the uri preview.
let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);
self.damage_tracker.frame().damage_line(damage);
// Damage the uri preview for the next frame as well.
self.damage_tracker.next_frame().damage_line(damage);
self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);
}
}
/// Draw current search regex.
#[inline(never)]
fn draw_search(&mut self, config: &UiConfig, text: &str) {
// Assure text length is at least num_cols.
let num_cols = self.size_info.columns();
let text = format!("{text:<num_cols$}");
let point = Point::new(self.size_info.screen_lines(), Column(0));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
self.renderer.draw_string(
point,
fg,
bg,
text.chars(),
&self.size_info,
&mut self.glyph_cache,
);
}
/// Draw render timer.
#[inline(never)]
fn draw_render_timer(&mut self, config: &UiConfig) {
if !config.debug.render_timer {
return;
}
let timing = format!("{:.3} usec", self.meter.average());
let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));
let fg = config.colors.primary.background;
let bg = config.colors.normal.red;
// Damage render timer for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);
}
/// Draw an indicator for the position of a line in history.
#[inline(never)]
fn draw_line_indicator(
&mut self,
config: &UiConfig,
total_lines: usize,
obstructed_column: Option<Column>,
line: usize,
) {
let columns = self.size_info.columns();
let text = format!("[{}/{}]", line, total_lines - 1);
let column = Column(self.size_info.columns().saturating_sub(text.len()));
let point = Point::new(0, column);
// Damage the line indicator for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let colors = &config.colors;
let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);
let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);
// Do not render anything if it would obscure the vi mode cursor.
if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);
}
}
/// Highlight damaged rects.
///
/// This function is for debug purposes only.
fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {
for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {
let x = damage_rect.x as f32;
let height = damage_rect.height as f32;
let width = damage_rect.width as f32;
let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;
let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);
render_rects.push(render_rect);
}
}
/// Check whether a hint highlight needs to be cleared.
fn validate_hint_highlights(&mut self, display_offset: usize) {
let frame = self.damage_tracker.frame();
let hints = [
(&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),
(&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),
];
let num_lines = self.size_info.screen_lines();
for (hint, hint_age, reset_mouse) in hints {
let (start, end) = match hint {
Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),
None => continue,
};
// Ignore hints that were created this frame.
*hint_age += 1;
if *hint_age == 1 {
continue;
}
// Convert hint bounds to viewport coordinates.
let start = term::point_to_viewport(display_offset, start)
.filter(|point| point.line < num_lines)
.unwrap_or_default();
let end = term::point_to_viewport(display_offset, end)
.filter(|point| point.line < num_lines)
.unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));
// Clear invalidated hints.
if frame.intersects(start, end) {
if reset_mouse {
self.window.set_mouse_cursor(CursorIcon::Default);
}
frame.mark_fully_damaged();
*hint = None;
}
}
}
/// Request a new frame for a window on Wayland.
fn request_frame(&mut self, scheduler: &mut Scheduler) {
// Mark that we've used a frame.
self.window.has_frame = false;
// Get the display vblank interval.
let monitor_vblank_interval = 1_000_000.
/ self
.window
.current_monitor()
.and_then(|monitor| monitor.refresh_rate_millihertz())
.unwrap_or(60_000) as f64;
// Now convert it to micro seconds.
let monitor_vblank_interval =
Duration::from_micros((1000. * monitor_vblank_interval) as u64);
let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);
let window_id = self.window.id();
let timer_id = TimerId::new(Topic::Frame, window_id);
let event = Event::new(EventType::Frame, window_id);
scheduler.schedule(event, swap_timeout, false, timer_id);
}
}
impl Drop for Display {
fn drop(&mut self) {
// Switch OpenGL context before dropping, otherwise objects (like programs) from other
// contexts might be deleted when dropping renderer.
self.make_current();
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
ManuallyDrop::drop(&mut self.surface);
}
}
}
/// Input method state.
#[derive(Debug, Default)]
pub struct Ime {
/// Whether the IME is enabled.
enabled: bool,
/// Current IME preedit.
preedit: Option<Preedit>,
}
impl Ime {
#[inline]
pub fn set_enabled(&mut self, is_enabled: bool) {
if is_enabled {
self.enabled = is_enabled
} else {
// Clear state when disabling IME.
*self = Default::default();
}
}
#[inline]
pub fn is_enabled(&self) -> bool {
self.enabled
}
#[inline]
pub fn set_preedit(&mut self, preedit: Option<Preedit>) {
self.preedit = preedit;
}
#[inline]
pub fn preedit(&self) -> Option<&Preedit> {
self.preedit.as_ref()
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Preedit {
/// The preedit text.
text: String,
/// Byte offset for cursor start into the preedit text.
///
/// `None` means that the cursor is invisible.
cursor_byte_offset: Option<(usize, usize)>,
/// The cursor offset from the end of the start of the preedit in char width.
cursor_end_offset: Option<(usize, usize)>,
}
impl Preedit {
pub fn new(text: String, cursor_byte_offset: Option<(usize, usize)>) -> Self {
let cursor_end_offset = if let Some(byte_offset) = cursor_byte_offset {
// Convert byte offset into char offset.
let start_to_end_offset =
text[byte_offset.0..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
let end_to_end_offset =
text[byte_offset.1..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
Some((start_to_end_offset, end_to_end_offset))
} else {
None
};
Self { text, cursor_byte_offset, cursor_end_offset }
}
}
/// Pending renderer updates.
///
/// All renderer updates are cached to be applied just before rendering, to avoid platform-specific
/// rendering issues.
#[derive(Debug, Default, Copy, Clone)]
pub struct RendererUpdate {
/// Should resize the window.
resize: bool,
/// Clear font caches.
clear_font_cache: bool,
}
/// The frame timer state.
pub struct FrameTimer {
/// Base timestamp used to compute sync points.
base: Instant,
/// The last timestamp we synced to.
last_synced_timestamp: Instant,
/// The refresh rate we've used to compute sync timestamps.
refresh_interval: Duration,
}
impl FrameTimer {
pub fn new() -> Self {
let now = Instant::now();
Self { base: now, last_synced_timestamp: now, refresh_interval: Duration::ZERO }
}
/// Compute the delay that we should use to achieve the target frame
/// rate.
pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {
let now = Instant::now();
// Handle refresh rate change.
if self.refresh_interval != refresh_interval {
self.base = now;
self.last_synced_timestamp = now;
self.refresh_interval = refresh_interval;
return refresh_interval;
}
let next_frame = self.last_synced_timestamp + self.refresh_interval;
if next_frame < now {
// Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.
let elapsed_micros = (now - self.base).as_micros() as u64;
let refresh_micros = self.refresh_interval.as_micros() as u64;
self.last_synced_timestamp =
now - Duration::from_micros(elapsed_micros % refresh_micros);
Duration::ZERO
} else {
// Redraw on the next `refresh_interval` clock tick.
self.last_synced_timestamp = next_frame;
next_frame - now
}
}
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
fn compute_cell_size(config: &UiConfig, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.font.offset.x);
let offset_y = f64::from(config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
/// Calculate the size of the window given padding, terminal dimensions and cell size.
fn window_size(
config: &UiConfig,
dimensions: Dimensions,
cell_width: f32,
cell_height: f32,
scale_factor: f32,
) -> PhysicalSize<u32> {
let padding = config.window.padding(scale_factor);
let grid_width = cell_width * dimensions.columns.max(MIN_COLUMNS) as f32;
let grid_height = cell_height * dimensions.lines.max(MIN_SCREEN_LINES) as f32;
let width = (padding.0).mul_add(2., grid_width).floor();
let height = (padding.1).mul_add(2., grid_height).floor();
PhysicalSize::new(width as u32, height as u32)
}
| rust | {
"argument_definitions": [],
"end_line": 1237,
"name": "format_search",
"signature": "fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String",
"start_line": 1216
} | {
"class_name": "impl Display {\n pub fn new(\n window: Window,\n gl_context: NotCurrentContext,\n config: &UiConfig,\n _tabbed: bool,\n ) -> Result<Display, Error> {\n let raw_window_handle = window.raw_window_handle();\n\n let scale_factor = window.scale_factor as f32;\n let rasterizer = Rasterizer::new()?;\n\n let font_size = config.font.size().scale(scale_factor);\n debug!(\"Loading \\\"{}\\\" font\", &config.font.normal().family);\n let font = config.font.clone().with_size(font_size);\n let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;\n\n let metrics = glyph_cache.font_metrics();\n let (cell_width, cell_height) = compute_cell_size(config, &metrics);\n\n // Resize the window to account for the user configured size.\n if let Some(dimensions) = config.window.dimensions() {\n let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);\n window.request_inner_size(size);\n }\n\n // Create the GL surface to draw into.\n let surface = platform::create_gl_surface(\n &gl_context,\n window.inner_size(),\n window.raw_window_handle(),\n )?;\n\n // Make the context current.\n let context = gl_context.make_current(&surface)?;\n\n // Create renderer.\n let mut renderer = Renderer::new(&context, config.debug.renderer)?;\n\n // Load font common glyphs to accelerate rendering.\n debug!(\"Filling glyph cache with common glyphs\");\n renderer.with_loader(|mut api| {\n glyph_cache.reset_glyph_cache(&mut api);\n });\n\n let padding = config.window.padding(window.scale_factor as f32);\n let viewport_size = window.inner_size();\n\n // Create new size with at least one column and row.\n let size_info = SizeInfo::new(\n viewport_size.width as f32,\n viewport_size.height as f32,\n cell_width,\n cell_height,\n padding.0,\n padding.1,\n config.window.dynamic_padding && config.window.dimensions().is_none(),\n );\n\n info!(\"Cell size: {} x {}\", cell_width, cell_height);\n info!(\"Padding: {} x {}\", size_info.padding_x(), size_info.padding_y());\n info!(\"Width: {}, Height: {}\", size_info.width(), size_info.height());\n\n // Update OpenGL projection.\n renderer.resize(&size_info);\n\n // Clear screen.\n let background_color = config.colors.primary.background;\n renderer.clear(background_color, config.window_opacity());\n\n // Disable shadows for transparent windows on macOS.\n #[cfg(target_os = \"macos\")]\n window.set_has_shadow(config.window_opacity() >= 1.0);\n\n let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));\n\n // On Wayland we can safely ignore this call, since the window isn't visible until you\n // actually draw something into it and commit those changes.\n if !is_wayland {\n surface.swap_buffers(&context).expect(\"failed to swap buffers.\");\n renderer.finish();\n }\n\n // Set resize increments for the newly created window.\n if config.window.resize_increments {\n window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));\n }\n\n window.set_visible(true);\n\n // Always focus new windows, even if no Alacritty window is currently focused.\n #[cfg(target_os = \"macos\")]\n window.focus_window();\n\n #[allow(clippy::single_match)]\n #[cfg(not(windows))]\n if !_tabbed {\n match config.window.startup_mode {\n #[cfg(target_os = \"macos\")]\n StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),\n StartupMode::Maximized if !is_wayland => window.set_maximized(true),\n _ => (),\n }\n }\n\n let hint_state = HintState::new(config.hints.alphabet());\n\n let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());\n damage_tracker.debug = config.debug.highlight_damage;\n\n // Disable vsync.\n if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {\n info!(\"Failed to disable vsync: {}\", err);\n }\n\n Ok(Self {\n context: ManuallyDrop::new(context),\n visual_bell: VisualBell::from(&config.bell),\n renderer: ManuallyDrop::new(renderer),\n renderer_preference: config.debug.renderer,\n surface: ManuallyDrop::new(surface),\n colors: List::from(&config.colors),\n frame_timer: FrameTimer::new(),\n raw_window_handle,\n damage_tracker,\n glyph_cache,\n hint_state,\n size_info,\n font_size,\n window,\n pending_renderer_update: Default::default(),\n vi_highlighted_hint_age: Default::default(),\n highlighted_hint_age: Default::default(),\n vi_highlighted_hint: Default::default(),\n highlighted_hint: Default::default(),\n hint_mouse_point: Default::default(),\n pending_update: Default::default(),\n cursor_hidden: Default::default(),\n meter: Default::default(),\n ime: Default::default(),\n })\n }\n\n #[inline]\n pub fn gl_context(&self) -> &PossiblyCurrentContext {\n &self.context\n }\n\n pub fn make_not_current(&mut self) {\n if self.context.is_current() {\n self.context.make_not_current_in_place().expect(\"failed to disable context\");\n }\n }\n\n pub fn make_current(&mut self) {\n let is_current = self.context.is_current();\n\n // Attempt to make the context current if it's not.\n let context_loss = if is_current {\n self.renderer.was_context_reset()\n } else {\n match self.context.make_current(&self.surface) {\n Err(err) if err.error_kind() == ErrorKind::ContextLost => {\n info!(\"Context lost for window {:?}\", self.window.id());\n true\n },\n _ => false,\n }\n };\n\n if !context_loss {\n return;\n }\n\n let gl_display = self.context.display();\n let gl_config = self.context.config();\n let raw_window_handle = Some(self.window.raw_window_handle());\n let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)\n .expect(\"failed to recreate context.\");\n\n // Drop the old context and renderer.\n unsafe {\n ManuallyDrop::drop(&mut self.renderer);\n ManuallyDrop::drop(&mut self.context);\n }\n\n // Activate new context.\n let context = context.treat_as_possibly_current();\n self.context = ManuallyDrop::new(context);\n self.context.make_current(&self.surface).expect(\"failed to reativate context after reset.\");\n\n // Recreate renderer.\n let renderer = Renderer::new(&self.context, self.renderer_preference)\n .expect(\"failed to recreate renderer after reset\");\n self.renderer = ManuallyDrop::new(renderer);\n\n // Resize the renderer.\n self.renderer.resize(&self.size_info);\n\n self.reset_glyph_cache();\n self.damage_tracker.frame().mark_fully_damaged();\n\n debug!(\"Recovered window {:?} from gpu reset\", self.window.id());\n }\n\n fn swap_buffers(&self) {\n #[allow(clippy::single_match)]\n let res = match (self.surface.deref(), &self.context.deref()) {\n #[cfg(not(any(target_os = \"macos\", windows)))]\n (Surface::Egl(surface), PossiblyCurrentContext::Egl(context))\n if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))\n && !self.damage_tracker.debug =>\n {\n let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());\n surface.swap_buffers_with_damage(context, &damage)\n },\n (surface, context) => surface.swap_buffers(context),\n };\n if let Err(err) = res {\n debug!(\"error calling swap_buffers: {}\", err);\n }\n }\n\n /// Update font size and cell dimensions.\n ///\n /// This will return a tuple of the cell width and height.\n fn update_font_size(\n glyph_cache: &mut GlyphCache,\n config: &UiConfig,\n font: &Font,\n ) -> (f32, f32) {\n let _ = glyph_cache.update_font_size(font);\n\n // Compute new cell sizes.\n compute_cell_size(config, &glyph_cache.font_metrics())\n }\n\n /// Reset glyph cache.\n fn reset_glyph_cache(&mut self) {\n let cache = &mut self.glyph_cache;\n self.renderer.with_loader(|mut api| {\n cache.reset_glyph_cache(&mut api);\n });\n }\n\n // XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are\n // performed in [`Self::process_renderer_update`] right before drawing.\n //\n /// Process update events.\n pub fn handle_update<T>(\n &mut self,\n terminal: &mut Term<T>,\n pty_resize_handle: &mut dyn OnResize,\n message_buffer: &MessageBuffer,\n search_state: &mut SearchState,\n config: &UiConfig,\n ) where\n T: EventListener,\n {\n let pending_update = mem::take(&mut self.pending_update);\n\n let (mut cell_width, mut cell_height) =\n (self.size_info.cell_width(), self.size_info.cell_height());\n\n if pending_update.font().is_some() || pending_update.cursor_dirty() {\n let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());\n renderer_update.clear_font_cache = true\n }\n\n // Update font size and cell dimensions.\n if let Some(font) = pending_update.font() {\n let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);\n cell_width = cell_dimensions.0;\n cell_height = cell_dimensions.1;\n\n info!(\"Cell size: {} x {}\", cell_width, cell_height);\n\n // Mark entire terminal as damaged since glyph size could change without cell size\n // changes.\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n let (mut width, mut height) = (self.size_info.width(), self.size_info.height());\n if let Some(dimensions) = pending_update.dimensions() {\n width = dimensions.width as f32;\n height = dimensions.height as f32;\n }\n\n let padding = config.window.padding(self.window.scale_factor as f32);\n\n let mut new_size = SizeInfo::new(\n width,\n height,\n cell_width,\n cell_height,\n padding.0,\n padding.1,\n config.window.dynamic_padding,\n );\n\n // Update number of column/lines in the viewport.\n let search_active = search_state.history_index.is_some();\n let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());\n let search_lines = usize::from(search_active);\n new_size.reserve_lines(message_bar_lines + search_lines);\n\n // Update resize increments.\n if config.window.resize_increments {\n self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));\n }\n\n // Resize when terminal when its dimensions have changed.\n if self.size_info.screen_lines() != new_size.screen_lines\n || self.size_info.columns() != new_size.columns()\n {\n // Resize PTY.\n pty_resize_handle.on_resize(new_size.into());\n\n // Resize terminal.\n terminal.resize(new_size);\n\n // Resize damage tracking.\n self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());\n }\n\n // Check if dimensions have changed.\n if new_size != self.size_info {\n // Queue renderer update.\n let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());\n renderer_update.resize = true;\n\n // Clear focused search match.\n search_state.clear_focused_match();\n }\n self.size_info = new_size;\n }\n\n // NOTE: Renderer updates are split off, since platforms like Wayland require resize and other\n // OpenGL operations to be performed right before rendering. Otherwise they could lock the\n // back buffer and render with the previous state. This also solves flickering during resizes.\n //\n /// Update the state of the renderer.\n pub fn process_renderer_update(&mut self) {\n let renderer_update = match self.pending_renderer_update.take() {\n Some(renderer_update) => renderer_update,\n _ => return,\n };\n\n // Resize renderer.\n if renderer_update.resize {\n let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();\n let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();\n self.surface.resize(&self.context, width, height);\n }\n\n // Ensure we're modifying the correct OpenGL context.\n self.make_current();\n\n if renderer_update.clear_font_cache {\n self.reset_glyph_cache();\n }\n\n self.renderer.resize(&self.size_info);\n\n info!(\"Padding: {} x {}\", self.size_info.padding_x(), self.size_info.padding_y());\n info!(\"Width: {}, Height: {}\", self.size_info.width(), self.size_info.height());\n }\n\n /// Draw the screen.\n ///\n /// A reference to Term whose state is being drawn must be provided.\n ///\n /// This call may block if vsync is enabled.\n pub fn draw<T: EventListener>(\n &mut self,\n mut terminal: MutexGuard<'_, Term<T>>,\n scheduler: &mut Scheduler,\n message_buffer: &MessageBuffer,\n config: &UiConfig,\n search_state: &mut SearchState,\n ) {\n // Collect renderable content before the terminal is dropped.\n let mut content = RenderableContent::new(config, self, &terminal, search_state);\n let mut grid_cells = Vec::new();\n for cell in &mut content {\n grid_cells.push(cell);\n }\n let selection_range = content.selection_range();\n let foreground_color = content.color(NamedColor::Foreground as usize);\n let background_color = content.color(NamedColor::Background as usize);\n let display_offset = content.display_offset();\n let cursor = content.cursor();\n\n let cursor_point = terminal.grid().cursor.point;\n let total_lines = terminal.grid().total_lines();\n let metrics = self.glyph_cache.font_metrics();\n let size_info = self.size_info;\n\n let vi_mode = terminal.mode().contains(TermMode::VI);\n let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };\n\n // Add damage from the terminal.\n match terminal.damage() {\n TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),\n TermDamage::Partial(damaged_lines) => {\n for damage in damaged_lines {\n self.damage_tracker.frame().damage_line(damage);\n }\n },\n }\n terminal.reset_damage();\n\n // Drop terminal as early as possible to free lock.\n drop(terminal);\n\n // Invalidate highlighted hints if grid has changed.\n self.validate_hint_highlights(display_offset);\n\n // Add damage from alacritty's UI elements overlapping terminal.\n\n let requires_full_damage = self.visual_bell.intensity() != 0.\n || self.hint_state.active()\n || search_state.regex().is_some();\n if requires_full_damage {\n self.damage_tracker.frame().mark_fully_damaged();\n self.damage_tracker.next_frame().mark_fully_damaged();\n }\n\n let vi_cursor_viewport_point =\n vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));\n self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);\n self.damage_tracker.damage_selection(selection_range, display_offset);\n\n // Make sure this window's OpenGL context is active.\n self.make_current();\n\n self.renderer.clear(background_color, config.window_opacity());\n let mut lines = RenderLines::new();\n\n // Optimize loop hint comparator.\n let has_highlighted_hint =\n self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();\n\n // Draw grid.\n {\n let _sampler = self.meter.sampler();\n\n // Ensure macOS hasn't reset our viewport.\n #[cfg(target_os = \"macos\")]\n self.renderer.set_viewport(&size_info);\n\n let glyph_cache = &mut self.glyph_cache;\n let highlighted_hint = &self.highlighted_hint;\n let vi_highlighted_hint = &self.vi_highlighted_hint;\n let damage_tracker = &mut self.damage_tracker;\n\n let cells = grid_cells.into_iter().map(|mut cell| {\n // Underline hints hovered by mouse or vi mode cursor.\n if has_highlighted_hint {\n let point = term::viewport_to_point(display_offset, cell.point);\n let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());\n\n let should_highlight = |hint: &Option<HintMatch>| {\n hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))\n };\n if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {\n damage_tracker.frame().damage_point(cell.point);\n cell.flags.insert(Flags::UNDERLINE);\n }\n }\n\n // Update underline/strikeout.\n lines.update(&cell);\n\n cell\n });\n self.renderer.draw_cells(&size_info, glyph_cache, cells);\n }\n\n let mut rects = lines.rects(&metrics, &size_info);\n\n if let Some(vi_cursor_point) = vi_cursor_point {\n // Indicate vi mode by showing the cursor's position in the top right corner.\n let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;\n let obstructed_column = Some(vi_cursor_point)\n .filter(|point| point.line == -(display_offset as i32))\n .map(|point| point.column);\n self.draw_line_indicator(config, total_lines, obstructed_column, line);\n } else if search_state.regex().is_some() {\n // Show current display offset in vi-less search to indicate match position.\n self.draw_line_indicator(config, total_lines, None, display_offset);\n };\n\n // Draw cursor.\n rects.extend(cursor.rects(&size_info, config.cursor.thickness()));\n\n // Push visual bell after url/underline/strikeout rects.\n let visual_bell_intensity = self.visual_bell.intensity();\n if visual_bell_intensity != 0. {\n let visual_bell_rect = RenderRect::new(\n 0.,\n 0.,\n size_info.width(),\n size_info.height(),\n config.bell.color,\n visual_bell_intensity as f32,\n );\n rects.push(visual_bell_rect);\n }\n\n // Handle IME positioning and search bar rendering.\n let ime_position = match search_state.regex() {\n Some(regex) => {\n let search_label = match search_state.direction() {\n Direction::Right => FORWARD_SEARCH_LABEL,\n Direction::Left => BACKWARD_SEARCH_LABEL,\n };\n\n let search_text = Self::format_search(regex, search_label, size_info.columns());\n\n // Render the search bar.\n self.draw_search(config, &search_text);\n\n // Draw search bar cursor.\n let line = size_info.screen_lines();\n let column = Column(search_text.chars().count() - 1);\n\n // Add cursor to search bar if IME is not active.\n if self.ime.preedit().is_none() {\n let fg = config.colors.footer_bar_foreground();\n let shape = CursorShape::Underline;\n let cursor_width = NonZeroU32::new(1).unwrap();\n let cursor =\n RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);\n rects.extend(cursor.rects(&size_info, config.cursor.thickness()));\n }\n\n Some(Point::new(line, column))\n },\n None => {\n let num_lines = self.size_info.screen_lines();\n match vi_cursor_viewport_point {\n None => term::point_to_viewport(display_offset, cursor_point)\n .filter(|point| point.line < num_lines),\n point => point,\n }\n },\n };\n\n // Handle IME.\n if self.ime.is_enabled() {\n if let Some(point) = ime_position {\n let (fg, bg) = if search_state.regex().is_some() {\n (config.colors.footer_bar_foreground(), config.colors.footer_bar_background())\n } else {\n (foreground_color, background_color)\n };\n\n self.draw_ime_preview(point, fg, bg, &mut rects, config);\n }\n }\n\n if let Some(message) = message_buffer.message() {\n let search_offset = usize::from(search_state.regex().is_some());\n let text = message.text(&size_info);\n\n // Create a new rectangle for the background.\n let start_line = size_info.screen_lines() + search_offset;\n let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());\n\n let bg = match message.ty() {\n MessageType::Error => config.colors.normal.red,\n MessageType::Warning => config.colors.normal.yellow,\n };\n\n let x = 0;\n let width = size_info.width() as i32;\n let height = (size_info.height() - y) as i32;\n let message_bar_rect =\n RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);\n\n // Push message_bar in the end, so it'll be above all other content.\n rects.push(message_bar_rect);\n\n // Always damage message bar, since it could have messages of the same size in it.\n self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);\n\n // Draw rectangles.\n self.renderer.draw_rects(&size_info, &metrics, rects);\n\n // Relay messages to the user.\n let glyph_cache = &mut self.glyph_cache;\n let fg = config.colors.primary.background;\n for (i, message_text) in text.iter().enumerate() {\n let point = Point::new(start_line + i, Column(0));\n self.renderer.draw_string(\n point,\n fg,\n bg,\n message_text.chars(),\n &size_info,\n glyph_cache,\n );\n }\n } else {\n // Draw rectangles.\n self.renderer.draw_rects(&size_info, &metrics, rects);\n }\n\n self.draw_render_timer(config);\n\n // Draw hyperlink uri preview.\n if has_highlighted_hint {\n let cursor_point = vi_cursor_point.or(Some(cursor_point));\n self.draw_hyperlink_preview(config, cursor_point, display_offset);\n }\n\n // Notify winit that we're about to present.\n self.window.pre_present_notify();\n\n // Highlight damage for debugging.\n if self.damage_tracker.debug {\n let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());\n let mut rects = Vec::with_capacity(damage.len());\n self.highlight_damage(&mut rects);\n self.renderer.draw_rects(&self.size_info, &metrics, rects);\n }\n\n // Clearing debug highlights from the previous frame requires full redraw.\n self.swap_buffers();\n\n if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {\n // On X11 `swap_buffers` does not block for vsync. However the next OpenGl command\n // will block to synchronize (this is `glClear` in Alacritty), which causes a\n // permanent one frame delay.\n self.renderer.finish();\n }\n\n // XXX: Request the new frame after swapping buffers, so the\n // time to finish OpenGL operations is accounted for in the timeout.\n if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {\n self.request_frame(scheduler);\n }\n\n self.damage_tracker.swap_damage();\n }\n\n /// Update to a new configuration.\n pub fn update_config(&mut self, config: &UiConfig) {\n self.damage_tracker.debug = config.debug.highlight_damage;\n self.visual_bell.update_config(&config.bell);\n self.colors = List::from(&config.colors);\n }\n\n /// Update the mouse/vi mode cursor hint highlighting.\n ///\n /// This will return whether the highlighted hints changed.\n pub fn update_highlighted_hints<T>(\n &mut self,\n term: &Term<T>,\n config: &UiConfig,\n mouse: &Mouse,\n modifiers: ModifiersState,\n ) -> bool {\n // Update vi mode cursor hint.\n let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {\n let mods = ModifiersState::all();\n let point = term.vi_mode_cursor.point;\n hint::highlighted_at(term, config, point, mods)\n } else {\n None\n };\n let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;\n self.vi_highlighted_hint = vi_highlighted_hint;\n self.vi_highlighted_hint_age = 0;\n\n // Force full redraw if the vi mode highlight was cleared.\n if dirty {\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n // Abort if mouse highlighting conditions are not met.\n if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {\n if self.highlighted_hint.take().is_some() {\n self.damage_tracker.frame().mark_fully_damaged();\n dirty = true;\n }\n return dirty;\n }\n\n // Find highlighted hint at mouse position.\n let point = mouse.point(&self.size_info, term.grid().display_offset());\n let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);\n\n // Update cursor shape.\n if highlighted_hint.is_some() {\n // If mouse changed the line, we should update the hyperlink preview, since the\n // highlighted hint could be disrupted by the old preview.\n dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);\n self.hint_mouse_point = Some(point);\n self.window.set_mouse_cursor(CursorIcon::Pointer);\n } else if self.highlighted_hint.is_some() {\n self.hint_mouse_point = None;\n if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {\n self.window.set_mouse_cursor(CursorIcon::Default);\n } else {\n self.window.set_mouse_cursor(CursorIcon::Text);\n }\n }\n\n let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;\n dirty |= mouse_highlight_dirty;\n self.highlighted_hint = highlighted_hint;\n self.highlighted_hint_age = 0;\n\n // Force full redraw if the mouse cursor highlight was changed.\n if mouse_highlight_dirty {\n self.damage_tracker.frame().mark_fully_damaged();\n }\n\n dirty\n }\n\n #[inline(never)]\n fn draw_ime_preview(\n &mut self,\n point: Point<usize>,\n fg: Rgb,\n bg: Rgb,\n rects: &mut Vec<RenderRect>,\n config: &UiConfig,\n ) {\n let preedit = match self.ime.preedit() {\n Some(preedit) => preedit,\n None => {\n // In case we don't have preedit, just set the popup point.\n self.window.update_ime_position(point, &self.size_info);\n return;\n },\n };\n\n let num_cols = self.size_info.columns();\n\n // Get the visible preedit.\n let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {\n (Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(\n &preedit.text[byte_offset.0..],\n num_cols,\n ShortenDirection::Right,\n Some(SHORTENER),\n ),\n _ => {\n StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))\n },\n }\n .collect();\n\n let visible_len = visible_text.chars().count();\n\n let end = cmp::min(point.column.0 + visible_len, num_cols);\n let start = end.saturating_sub(visible_len);\n\n let start = Point::new(point.line, Column(start));\n let end = Point::new(point.line, Column(end - 1));\n\n let glyph_cache = &mut self.glyph_cache;\n let metrics = glyph_cache.font_metrics();\n\n self.renderer.draw_string(\n start,\n fg,\n bg,\n visible_text.chars(),\n &self.size_info,\n glyph_cache,\n );\n\n // Damage preedit inside the terminal viewport.\n if point.line < self.size_info.screen_lines() {\n let damage = LineDamageBounds::new(start.line, 0, num_cols);\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n }\n\n // Add underline for preedit text.\n let underline = RenderLine { start, end, color: fg };\n rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));\n\n let ime_popup_point = match preedit.cursor_end_offset {\n Some(cursor_end_offset) => {\n // Use hollow block when multiple characters are changed at once.\n let (shape, width) = if let Some(width) =\n NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)\n {\n (CursorShape::HollowBlock, width)\n } else {\n (CursorShape::Beam, NonZeroU32::new(1).unwrap())\n };\n\n let cursor_column = Column(\n (end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,\n );\n let cursor_point = Point::new(point.line, cursor_column);\n let cursor = RenderableCursor::new(cursor_point, shape, fg, width);\n rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));\n cursor_point\n },\n _ => end,\n };\n\n self.window.update_ime_position(ime_popup_point, &self.size_info);\n }\n\n /// Format search regex to account for the cursor and fullwidth characters.\n fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {\n let label_len = search_label.len();\n\n // Skip `search_regex` formatting if only label is visible.\n if label_len > max_width {\n return search_label[..max_width].to_owned();\n }\n\n // The search string consists of `search_label` + `search_regex` + `cursor`.\n let mut bar_text = String::from(search_label);\n bar_text.extend(StrShortener::new(\n search_regex,\n max_width.wrapping_sub(label_len + 1),\n ShortenDirection::Left,\n Some(SHORTENER),\n ));\n\n // Add place for cursor.\n bar_text.push(' ');\n\n bar_text\n }\n\n /// Draw preview for the currently highlighted `Hyperlink`.\n #[inline(never)]\n fn draw_hyperlink_preview(\n &mut self,\n config: &UiConfig,\n cursor_point: Option<Point>,\n display_offset: usize,\n ) {\n let num_cols = self.size_info.columns();\n let uris: Vec<_> = self\n .highlighted_hint\n .iter()\n .chain(&self.vi_highlighted_hint)\n .filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))\n .map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))\n .collect();\n\n if uris.is_empty() {\n return;\n }\n\n // The maximum amount of protected lines including the ones we'll show preview on.\n let max_protected_lines = uris.len() * 2;\n\n // Lines we shouldn't show preview on, because it'll obscure the highlighted hint.\n let mut protected_lines = Vec::with_capacity(max_protected_lines);\n if self.size_info.screen_lines() > max_protected_lines {\n // Prefer to show preview even when it'll likely obscure the highlighted hint, when\n // there's no place left for it.\n protected_lines.push(self.hint_mouse_point.map(|point| point.line));\n protected_lines.push(cursor_point.map(|point| point.line));\n }\n\n // Find the line in viewport we can draw preview on without obscuring protected lines.\n let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);\n let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);\n let uri_lines = (viewport_top.0..=viewport_bottom.0)\n .rev()\n .map(|line| Some(Line(line)))\n .filter_map(|line| {\n if protected_lines.contains(&line) {\n None\n } else {\n protected_lines.push(line);\n line\n }\n })\n .take(uris.len())\n .flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));\n\n let fg = config.colors.footer_bar_foreground();\n let bg = config.colors.footer_bar_background();\n for (uri, point) in uris.into_iter().zip(uri_lines) {\n // Damage the uri preview.\n let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);\n self.damage_tracker.frame().damage_line(damage);\n\n // Damage the uri preview for the next frame as well.\n self.damage_tracker.next_frame().damage_line(damage);\n\n self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);\n }\n }\n\n /// Draw current search regex.\n #[inline(never)]\n fn draw_search(&mut self, config: &UiConfig, text: &str) {\n // Assure text length is at least num_cols.\n let num_cols = self.size_info.columns();\n let text = format!(\"{text:<num_cols$}\");\n\n let point = Point::new(self.size_info.screen_lines(), Column(0));\n\n let fg = config.colors.footer_bar_foreground();\n let bg = config.colors.footer_bar_background();\n\n self.renderer.draw_string(\n point,\n fg,\n bg,\n text.chars(),\n &self.size_info,\n &mut self.glyph_cache,\n );\n }\n\n /// Draw render timer.\n #[inline(never)]\n fn draw_render_timer(&mut self, config: &UiConfig) {\n if !config.debug.render_timer {\n return;\n }\n\n let timing = format!(\"{:.3} usec\", self.meter.average());\n let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));\n let fg = config.colors.primary.background;\n let bg = config.colors.normal.red;\n\n // Damage render timer for current and next frame.\n let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n\n let glyph_cache = &mut self.glyph_cache;\n self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);\n }\n\n /// Draw an indicator for the position of a line in history.\n #[inline(never)]\n fn draw_line_indicator(\n &mut self,\n config: &UiConfig,\n total_lines: usize,\n obstructed_column: Option<Column>,\n line: usize,\n ) {\n let columns = self.size_info.columns();\n let text = format!(\"[{}/{}]\", line, total_lines - 1);\n let column = Column(self.size_info.columns().saturating_sub(text.len()));\n let point = Point::new(0, column);\n\n // Damage the line indicator for current and next frame.\n let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);\n self.damage_tracker.frame().damage_line(damage);\n self.damage_tracker.next_frame().damage_line(damage);\n\n let colors = &config.colors;\n let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);\n let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);\n\n // Do not render anything if it would obscure the vi mode cursor.\n if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {\n let glyph_cache = &mut self.glyph_cache;\n self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);\n }\n }\n\n /// Highlight damaged rects.\n ///\n /// This function is for debug purposes only.\n fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {\n for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {\n let x = damage_rect.x as f32;\n let height = damage_rect.height as f32;\n let width = damage_rect.width as f32;\n let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;\n let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);\n\n render_rects.push(render_rect);\n }\n }\n\n /// Check whether a hint highlight needs to be cleared.\n fn validate_hint_highlights(&mut self, display_offset: usize) {\n let frame = self.damage_tracker.frame();\n let hints = [\n (&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),\n (&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),\n ];\n\n let num_lines = self.size_info.screen_lines();\n for (hint, hint_age, reset_mouse) in hints {\n let (start, end) = match hint {\n Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),\n None => continue,\n };\n\n // Ignore hints that were created this frame.\n *hint_age += 1;\n if *hint_age == 1 {\n continue;\n }\n\n // Convert hint bounds to viewport coordinates.\n let start = term::point_to_viewport(display_offset, start)\n .filter(|point| point.line < num_lines)\n .unwrap_or_default();\n let end = term::point_to_viewport(display_offset, end)\n .filter(|point| point.line < num_lines)\n .unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));\n\n // Clear invalidated hints.\n if frame.intersects(start, end) {\n if reset_mouse {\n self.window.set_mouse_cursor(CursorIcon::Default);\n }\n frame.mark_fully_damaged();\n *hint = None;\n }\n }\n }\n\n /// Request a new frame for a window on Wayland.\n fn request_frame(&mut self, scheduler: &mut Scheduler) {\n // Mark that we've used a frame.\n self.window.has_frame = false;\n\n // Get the display vblank interval.\n let monitor_vblank_interval = 1_000_000.\n / self\n .window\n .current_monitor()\n .and_then(|monitor| monitor.refresh_rate_millihertz())\n .unwrap_or(60_000) as f64;\n\n // Now convert it to micro seconds.\n let monitor_vblank_interval =\n Duration::from_micros((1000. * monitor_vblank_interval) as u64);\n\n let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);\n\n let window_id = self.window.id();\n let timer_id = TimerId::new(Topic::Frame, window_id);\n let event = Event::new(EventType::Frame, window_id);\n\n scheduler.schedule(event, swap_timeout, false, timer_id);\n }\n}",
"class_signature": "impl Display"
} |
compute_timeout | alacritty-master/alacritty/src/display/mod.rs | pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {
let now = Instant::now();
// Handle refresh rate change.
if self.refresh_interval != refresh_interval {
self.base = now;
self.last_synced_timestamp = now;
self.refresh_interval = refresh_interval;
return refresh_interval;
}
let next_frame = self.last_synced_timestamp + self.refresh_interval;
if next_frame < now {
// Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.
let elapsed_micros = (now - self.base).as_micros() as u64;
let refresh_micros = self.refresh_interval.as_micros() as u64;
self.last_synced_timestamp =
now - Duration::from_micros(elapsed_micros % refresh_micros);
Duration::ZERO
} else {
// Redraw on the next `refresh_interval` clock tick.
self.last_synced_timestamp = next_frame;
next_frame - now
}
} | //! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::cmp;
use std::fmt::{self, Formatter};
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU32;
use std::ops::Deref;
use std::time::{Duration, Instant};
use glutin::config::GetGlConfig;
use glutin::context::{NotCurrentContext, PossiblyCurrentContext};
use glutin::display::GetGlDisplay;
use glutin::error::ErrorKind;
use glutin::prelude::*;
use glutin::surface::{Surface, SwapInterval, WindowSurface};
use log::{debug, info};
use parking_lot::MutexGuard;
use serde::{Deserialize, Serialize};
use winit::dpi::PhysicalSize;
use winit::keyboard::ModifiersState;
use winit::raw_window_handle::RawWindowHandle;
use winit::window::CursorIcon;
use crossfont::{Rasterize, Rasterizer, Size as FontSize};
use unicode_width::UnicodeWidthChar;
use alacritty_terminal::event::{EventListener, OnResize, WindowSize};
use alacritty_terminal::grid::Dimensions as TermDimensions;
use alacritty_terminal::index::{Column, Direction, Line, Point};
use alacritty_terminal::selection::Selection;
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::{
self, LineDamageBounds, Term, TermDamage, TermMode, MIN_COLUMNS, MIN_SCREEN_LINES,
};
use alacritty_terminal::vte::ansi::{CursorShape, NamedColor};
use crate::config::debug::RendererPreference;
use crate::config::font::Font;
use crate::config::window::Dimensions;
#[cfg(not(windows))]
use crate::config::window::StartupMode;
use crate::config::UiConfig;
use crate::display::bell::VisualBell;
use crate::display::color::{List, Rgb};
use crate::display::content::{RenderableContent, RenderableCursor};
use crate::display::cursor::IntoRects;
use crate::display::damage::{damage_y_to_viewport_y, DamageTracker};
use crate::display::hint::{HintMatch, HintState};
use crate::display::meter::Meter;
use crate::display::window::Window;
use crate::event::{Event, EventType, Mouse, SearchState};
use crate::message_bar::{MessageBuffer, MessageType};
use crate::renderer::rects::{RenderLine, RenderLines, RenderRect};
use crate::renderer::{self, platform, GlyphCache, Renderer};
use crate::scheduler::{Scheduler, TimerId, Topic};
use crate::string::{ShortenDirection, StrShortener};
pub mod color;
pub mod content;
pub mod cursor;
pub mod hint;
pub mod window;
mod bell;
mod damage;
mod meter;
/// Label for the forward terminal search bar.
const FORWARD_SEARCH_LABEL: &str = "Search: ";
/// Label for the backward terminal search bar.
const BACKWARD_SEARCH_LABEL: &str = "Backward Search: ";
/// The character used to shorten the visible text like uri preview or search regex.
const SHORTENER: char = '…';
/// Color which is used to highlight damaged rects when debugging.
const DAMAGE_RECT_COLOR: Rgb = Rgb::new(255, 0, 255);
#[derive(Debug)]
pub enum Error {
/// Error with window management.
Window(window::Error),
/// Error dealing with fonts.
Font(crossfont::Error),
/// Error in renderer.
Render(renderer::Error),
/// Error during context operations.
Context(glutin::error::Error),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Window(err) => err.source(),
Error::Font(err) => err.source(),
Error::Render(err) => err.source(),
Error::Context(err) => err.source(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::Window(err) => err.fmt(f),
Error::Font(err) => err.fmt(f),
Error::Render(err) => err.fmt(f),
Error::Context(err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Self {
Error::Window(val)
}
}
impl From<crossfont::Error> for Error {
fn from(val: crossfont::Error) -> Self {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Self {
Error::Render(val)
}
}
impl From<glutin::error::Error> for Error {
fn from(val: glutin::error::Error) -> Self {
Error::Context(val)
}
}
/// Terminal size info.
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub struct SizeInfo<T = f32> {
/// Terminal window width.
width: T,
/// Terminal window height.
height: T,
/// Width of individual cell.
cell_width: T,
/// Height of individual cell.
cell_height: T,
/// Horizontal window padding.
padding_x: T,
/// Vertical window padding.
padding_y: T,
/// Number of lines in the viewport.
screen_lines: usize,
/// Number of columns in the viewport.
columns: usize,
}
impl From<SizeInfo<f32>> for SizeInfo<u32> {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
width: size_info.width as u32,
height: size_info.height as u32,
cell_width: size_info.cell_width as u32,
cell_height: size_info.cell_height as u32,
padding_x: size_info.padding_x as u32,
padding_y: size_info.padding_y as u32,
screen_lines: size_info.screen_lines,
columns: size_info.screen_lines,
}
}
}
impl From<SizeInfo<f32>> for WindowSize {
fn from(size_info: SizeInfo<f32>) -> Self {
Self {
num_cols: size_info.columns() as u16,
num_lines: size_info.screen_lines() as u16,
cell_width: size_info.cell_width() as u16,
cell_height: size_info.cell_height() as u16,
}
}
}
impl<T: Clone + Copy> SizeInfo<T> {
#[inline]
pub fn width(&self) -> T {
self.width
}
#[inline]
pub fn height(&self) -> T {
self.height
}
#[inline]
pub fn cell_width(&self) -> T {
self.cell_width
}
#[inline]
pub fn cell_height(&self) -> T {
self.cell_height
}
#[inline]
pub fn padding_x(&self) -> T {
self.padding_x
}
#[inline]
pub fn padding_y(&self) -> T {
self.padding_y
}
}
impl SizeInfo<f32> {
#[allow(clippy::too_many_arguments)]
pub fn new(
width: f32,
height: f32,
cell_width: f32,
cell_height: f32,
mut padding_x: f32,
mut padding_y: f32,
dynamic_padding: bool,
) -> SizeInfo {
if dynamic_padding {
padding_x = Self::dynamic_padding(padding_x.floor(), width, cell_width);
padding_y = Self::dynamic_padding(padding_y.floor(), height, cell_height);
}
let lines = (height - 2. * padding_y) / cell_height;
let screen_lines = cmp::max(lines as usize, MIN_SCREEN_LINES);
let columns = (width - 2. * padding_x) / cell_width;
let columns = cmp::max(columns as usize, MIN_COLUMNS);
SizeInfo {
width,
height,
cell_width,
cell_height,
padding_x: padding_x.floor(),
padding_y: padding_y.floor(),
screen_lines,
columns,
}
}
#[inline]
pub fn reserve_lines(&mut self, count: usize) {
self.screen_lines = cmp::max(self.screen_lines.saturating_sub(count), MIN_SCREEN_LINES);
}
/// Check if coordinates are inside the terminal grid.
///
/// The padding, message bar or search are not counted as part of the grid.
#[inline]
pub fn contains_point(&self, x: usize, y: usize) -> bool {
x <= (self.padding_x + self.columns as f32 * self.cell_width) as usize
&& x > self.padding_x as usize
&& y <= (self.padding_y + self.screen_lines as f32 * self.cell_height) as usize
&& y > self.padding_y as usize
}
/// Calculate padding to spread it evenly around the terminal content.
#[inline]
fn dynamic_padding(padding: f32, dimension: f32, cell_dimension: f32) -> f32 {
padding + ((dimension - 2. * padding) % cell_dimension) / 2.
}
}
impl TermDimensions for SizeInfo {
#[inline]
fn columns(&self) -> usize {
self.columns
}
#[inline]
fn screen_lines(&self) -> usize {
self.screen_lines
}
#[inline]
fn total_lines(&self) -> usize {
self.screen_lines()
}
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct DisplayUpdate {
pub dirty: bool,
dimensions: Option<PhysicalSize<u32>>,
cursor_dirty: bool,
font: Option<Font>,
}
impl DisplayUpdate {
pub fn dimensions(&self) -> Option<PhysicalSize<u32>> {
self.dimensions
}
pub fn font(&self) -> Option<&Font> {
self.font.as_ref()
}
pub fn cursor_dirty(&self) -> bool {
self.cursor_dirty
}
pub fn set_dimensions(&mut self, dimensions: PhysicalSize<u32>) {
self.dimensions = Some(dimensions);
self.dirty = true;
}
pub fn set_font(&mut self, font: Font) {
self.font = Some(font);
self.dirty = true;
}
pub fn set_cursor_dirty(&mut self) {
self.cursor_dirty = true;
self.dirty = true;
}
}
/// The display wraps a window, font rasterizer, and GPU renderer.
pub struct Display {
pub window: Window,
pub size_info: SizeInfo,
/// Hint highlighted by the mouse.
pub highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
highlighted_hint_age: usize,
/// Hint highlighted by the vi mode cursor.
pub vi_highlighted_hint: Option<HintMatch>,
/// Frames since hint highlight was created.
vi_highlighted_hint_age: usize,
pub raw_window_handle: RawWindowHandle,
/// UI cursor visibility for blinking.
pub cursor_hidden: bool,
pub visual_bell: VisualBell,
/// Mapped RGB values for each terminal color.
pub colors: List,
/// State of the keyboard hints.
pub hint_state: HintState,
/// Unprocessed display updates.
pub pending_update: DisplayUpdate,
/// The renderer update that takes place only once before the actual rendering.
pub pending_renderer_update: Option<RendererUpdate>,
/// The ime on the given display.
pub ime: Ime,
/// The state of the timer for frame scheduling.
pub frame_timer: FrameTimer,
/// Damage tracker for the given display.
pub damage_tracker: DamageTracker,
/// Font size used by the window.
pub font_size: FontSize,
// Mouse point position when highlighting hints.
hint_mouse_point: Option<Point>,
renderer: ManuallyDrop<Renderer>,
renderer_preference: Option<RendererPreference>,
surface: ManuallyDrop<Surface<WindowSurface>>,
context: ManuallyDrop<PossiblyCurrentContext>,
glyph_cache: GlyphCache,
meter: Meter,
}
impl Display {
pub fn new(
window: Window,
gl_context: NotCurrentContext,
config: &UiConfig,
_tabbed: bool,
) -> Result<Display, Error> {
let raw_window_handle = window.raw_window_handle();
let scale_factor = window.scale_factor as f32;
let rasterizer = Rasterizer::new()?;
let font_size = config.font.size().scale(scale_factor);
debug!("Loading \"{}\" font", &config.font.normal().family);
let font = config.font.clone().with_size(font_size);
let mut glyph_cache = GlyphCache::new(rasterizer, &font)?;
let metrics = glyph_cache.font_metrics();
let (cell_width, cell_height) = compute_cell_size(config, &metrics);
// Resize the window to account for the user configured size.
if let Some(dimensions) = config.window.dimensions() {
let size = window_size(config, dimensions, cell_width, cell_height, scale_factor);
window.request_inner_size(size);
}
// Create the GL surface to draw into.
let surface = platform::create_gl_surface(
&gl_context,
window.inner_size(),
window.raw_window_handle(),
)?;
// Make the context current.
let context = gl_context.make_current(&surface)?;
// Create renderer.
let mut renderer = Renderer::new(&context, config.debug.renderer)?;
// Load font common glyphs to accelerate rendering.
debug!("Filling glyph cache with common glyphs");
renderer.with_loader(|mut api| {
glyph_cache.reset_glyph_cache(&mut api);
});
let padding = config.window.padding(window.scale_factor as f32);
let viewport_size = window.inner_size();
// Create new size with at least one column and row.
let size_info = SizeInfo::new(
viewport_size.width as f32,
viewport_size.height as f32,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding && config.window.dimensions().is_none(),
);
info!("Cell size: {} x {}", cell_width, cell_height);
info!("Padding: {} x {}", size_info.padding_x(), size_info.padding_y());
info!("Width: {}, Height: {}", size_info.width(), size_info.height());
// Update OpenGL projection.
renderer.resize(&size_info);
// Clear screen.
let background_color = config.colors.primary.background;
renderer.clear(background_color, config.window_opacity());
// Disable shadows for transparent windows on macOS.
#[cfg(target_os = "macos")]
window.set_has_shadow(config.window_opacity() >= 1.0);
let is_wayland = matches!(raw_window_handle, RawWindowHandle::Wayland(_));
// On Wayland we can safely ignore this call, since the window isn't visible until you
// actually draw something into it and commit those changes.
if !is_wayland {
surface.swap_buffers(&context).expect("failed to swap buffers.");
renderer.finish();
}
// Set resize increments for the newly created window.
if config.window.resize_increments {
window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
window.set_visible(true);
// Always focus new windows, even if no Alacritty window is currently focused.
#[cfg(target_os = "macos")]
window.focus_window();
#[allow(clippy::single_match)]
#[cfg(not(windows))]
if !_tabbed {
match config.window.startup_mode {
#[cfg(target_os = "macos")]
StartupMode::SimpleFullscreen => window.set_simple_fullscreen(true),
StartupMode::Maximized if !is_wayland => window.set_maximized(true),
_ => (),
}
}
let hint_state = HintState::new(config.hints.alphabet());
let mut damage_tracker = DamageTracker::new(size_info.screen_lines(), size_info.columns());
damage_tracker.debug = config.debug.highlight_damage;
// Disable vsync.
if let Err(err) = surface.set_swap_interval(&context, SwapInterval::DontWait) {
info!("Failed to disable vsync: {}", err);
}
Ok(Self {
context: ManuallyDrop::new(context),
visual_bell: VisualBell::from(&config.bell),
renderer: ManuallyDrop::new(renderer),
renderer_preference: config.debug.renderer,
surface: ManuallyDrop::new(surface),
colors: List::from(&config.colors),
frame_timer: FrameTimer::new(),
raw_window_handle,
damage_tracker,
glyph_cache,
hint_state,
size_info,
font_size,
window,
pending_renderer_update: Default::default(),
vi_highlighted_hint_age: Default::default(),
highlighted_hint_age: Default::default(),
vi_highlighted_hint: Default::default(),
highlighted_hint: Default::default(),
hint_mouse_point: Default::default(),
pending_update: Default::default(),
cursor_hidden: Default::default(),
meter: Default::default(),
ime: Default::default(),
})
}
#[inline]
pub fn gl_context(&self) -> &PossiblyCurrentContext {
&self.context
}
pub fn make_not_current(&mut self) {
if self.context.is_current() {
self.context.make_not_current_in_place().expect("failed to disable context");
}
}
pub fn make_current(&mut self) {
let is_current = self.context.is_current();
// Attempt to make the context current if it's not.
let context_loss = if is_current {
self.renderer.was_context_reset()
} else {
match self.context.make_current(&self.surface) {
Err(err) if err.error_kind() == ErrorKind::ContextLost => {
info!("Context lost for window {:?}", self.window.id());
true
},
_ => false,
}
};
if !context_loss {
return;
}
let gl_display = self.context.display();
let gl_config = self.context.config();
let raw_window_handle = Some(self.window.raw_window_handle());
let context = platform::create_gl_context(&gl_display, &gl_config, raw_window_handle)
.expect("failed to recreate context.");
// Drop the old context and renderer.
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
}
// Activate new context.
let context = context.treat_as_possibly_current();
self.context = ManuallyDrop::new(context);
self.context.make_current(&self.surface).expect("failed to reativate context after reset.");
// Recreate renderer.
let renderer = Renderer::new(&self.context, self.renderer_preference)
.expect("failed to recreate renderer after reset");
self.renderer = ManuallyDrop::new(renderer);
// Resize the renderer.
self.renderer.resize(&self.size_info);
self.reset_glyph_cache();
self.damage_tracker.frame().mark_fully_damaged();
debug!("Recovered window {:?} from gpu reset", self.window.id());
}
fn swap_buffers(&self) {
#[allow(clippy::single_match)]
let res = match (self.surface.deref(), &self.context.deref()) {
#[cfg(not(any(target_os = "macos", windows)))]
(Surface::Egl(surface), PossiblyCurrentContext::Egl(context))
if matches!(self.raw_window_handle, RawWindowHandle::Wayland(_))
&& !self.damage_tracker.debug =>
{
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
surface.swap_buffers_with_damage(context, &damage)
},
(surface, context) => surface.swap_buffers(context),
};
if let Err(err) = res {
debug!("error calling swap_buffers: {}", err);
}
}
/// Update font size and cell dimensions.
///
/// This will return a tuple of the cell width and height.
fn update_font_size(
glyph_cache: &mut GlyphCache,
config: &UiConfig,
font: &Font,
) -> (f32, f32) {
let _ = glyph_cache.update_font_size(font);
// Compute new cell sizes.
compute_cell_size(config, &glyph_cache.font_metrics())
}
/// Reset glyph cache.
fn reset_glyph_cache(&mut self) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
cache.reset_glyph_cache(&mut api);
});
}
// XXX: this function must not call to any `OpenGL` related tasks. Renderer updates are
// performed in [`Self::process_renderer_update`] right before drawing.
//
/// Process update events.
pub fn handle_update<T>(
&mut self,
terminal: &mut Term<T>,
pty_resize_handle: &mut dyn OnResize,
message_buffer: &MessageBuffer,
search_state: &mut SearchState,
config: &UiConfig,
) where
T: EventListener,
{
let pending_update = mem::take(&mut self.pending_update);
let (mut cell_width, mut cell_height) =
(self.size_info.cell_width(), self.size_info.cell_height());
if pending_update.font().is_some() || pending_update.cursor_dirty() {
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.clear_font_cache = true
}
// Update font size and cell dimensions.
if let Some(font) = pending_update.font() {
let cell_dimensions = Self::update_font_size(&mut self.glyph_cache, config, font);
cell_width = cell_dimensions.0;
cell_height = cell_dimensions.1;
info!("Cell size: {} x {}", cell_width, cell_height);
// Mark entire terminal as damaged since glyph size could change without cell size
// changes.
self.damage_tracker.frame().mark_fully_damaged();
}
let (mut width, mut height) = (self.size_info.width(), self.size_info.height());
if let Some(dimensions) = pending_update.dimensions() {
width = dimensions.width as f32;
height = dimensions.height as f32;
}
let padding = config.window.padding(self.window.scale_factor as f32);
let mut new_size = SizeInfo::new(
width,
height,
cell_width,
cell_height,
padding.0,
padding.1,
config.window.dynamic_padding,
);
// Update number of column/lines in the viewport.
let search_active = search_state.history_index.is_some();
let message_bar_lines = message_buffer.message().map_or(0, |m| m.text(&new_size).len());
let search_lines = usize::from(search_active);
new_size.reserve_lines(message_bar_lines + search_lines);
// Update resize increments.
if config.window.resize_increments {
self.window.set_resize_increments(PhysicalSize::new(cell_width, cell_height));
}
// Resize when terminal when its dimensions have changed.
if self.size_info.screen_lines() != new_size.screen_lines
|| self.size_info.columns() != new_size.columns()
{
// Resize PTY.
pty_resize_handle.on_resize(new_size.into());
// Resize terminal.
terminal.resize(new_size);
// Resize damage tracking.
self.damage_tracker.resize(new_size.screen_lines(), new_size.columns());
}
// Check if dimensions have changed.
if new_size != self.size_info {
// Queue renderer update.
let renderer_update = self.pending_renderer_update.get_or_insert(Default::default());
renderer_update.resize = true;
// Clear focused search match.
search_state.clear_focused_match();
}
self.size_info = new_size;
}
// NOTE: Renderer updates are split off, since platforms like Wayland require resize and other
// OpenGL operations to be performed right before rendering. Otherwise they could lock the
// back buffer and render with the previous state. This also solves flickering during resizes.
//
/// Update the state of the renderer.
pub fn process_renderer_update(&mut self) {
let renderer_update = match self.pending_renderer_update.take() {
Some(renderer_update) => renderer_update,
_ => return,
};
// Resize renderer.
if renderer_update.resize {
let width = NonZeroU32::new(self.size_info.width() as u32).unwrap();
let height = NonZeroU32::new(self.size_info.height() as u32).unwrap();
self.surface.resize(&self.context, width, height);
}
// Ensure we're modifying the correct OpenGL context.
self.make_current();
if renderer_update.clear_font_cache {
self.reset_glyph_cache();
}
self.renderer.resize(&self.size_info);
info!("Padding: {} x {}", self.size_info.padding_x(), self.size_info.padding_y());
info!("Width: {}, Height: {}", self.size_info.width(), self.size_info.height());
}
/// Draw the screen.
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled.
pub fn draw<T: EventListener>(
&mut self,
mut terminal: MutexGuard<'_, Term<T>>,
scheduler: &mut Scheduler,
message_buffer: &MessageBuffer,
config: &UiConfig,
search_state: &mut SearchState,
) {
// Collect renderable content before the terminal is dropped.
let mut content = RenderableContent::new(config, self, &terminal, search_state);
let mut grid_cells = Vec::new();
for cell in &mut content {
grid_cells.push(cell);
}
let selection_range = content.selection_range();
let foreground_color = content.color(NamedColor::Foreground as usize);
let background_color = content.color(NamedColor::Background as usize);
let display_offset = content.display_offset();
let cursor = content.cursor();
let cursor_point = terminal.grid().cursor.point;
let total_lines = terminal.grid().total_lines();
let metrics = self.glyph_cache.font_metrics();
let size_info = self.size_info;
let vi_mode = terminal.mode().contains(TermMode::VI);
let vi_cursor_point = if vi_mode { Some(terminal.vi_mode_cursor.point) } else { None };
// Add damage from the terminal.
match terminal.damage() {
TermDamage::Full => self.damage_tracker.frame().mark_fully_damaged(),
TermDamage::Partial(damaged_lines) => {
for damage in damaged_lines {
self.damage_tracker.frame().damage_line(damage);
}
},
}
terminal.reset_damage();
// Drop terminal as early as possible to free lock.
drop(terminal);
// Invalidate highlighted hints if grid has changed.
self.validate_hint_highlights(display_offset);
// Add damage from alacritty's UI elements overlapping terminal.
let requires_full_damage = self.visual_bell.intensity() != 0.
|| self.hint_state.active()
|| search_state.regex().is_some();
if requires_full_damage {
self.damage_tracker.frame().mark_fully_damaged();
self.damage_tracker.next_frame().mark_fully_damaged();
}
let vi_cursor_viewport_point =
vi_cursor_point.and_then(|cursor| term::point_to_viewport(display_offset, cursor));
self.damage_tracker.damage_vi_cursor(vi_cursor_viewport_point);
self.damage_tracker.damage_selection(selection_range, display_offset);
// Make sure this window's OpenGL context is active.
self.make_current();
self.renderer.clear(background_color, config.window_opacity());
let mut lines = RenderLines::new();
// Optimize loop hint comparator.
let has_highlighted_hint =
self.highlighted_hint.is_some() || self.vi_highlighted_hint.is_some();
// Draw grid.
{
let _sampler = self.meter.sampler();
// Ensure macOS hasn't reset our viewport.
#[cfg(target_os = "macos")]
self.renderer.set_viewport(&size_info);
let glyph_cache = &mut self.glyph_cache;
let highlighted_hint = &self.highlighted_hint;
let vi_highlighted_hint = &self.vi_highlighted_hint;
let damage_tracker = &mut self.damage_tracker;
let cells = grid_cells.into_iter().map(|mut cell| {
// Underline hints hovered by mouse or vi mode cursor.
if has_highlighted_hint {
let point = term::viewport_to_point(display_offset, cell.point);
let hyperlink = cell.extra.as_ref().and_then(|extra| extra.hyperlink.as_ref());
let should_highlight = |hint: &Option<HintMatch>| {
hint.as_ref().is_some_and(|hint| hint.should_highlight(point, hyperlink))
};
if should_highlight(highlighted_hint) || should_highlight(vi_highlighted_hint) {
damage_tracker.frame().damage_point(cell.point);
cell.flags.insert(Flags::UNDERLINE);
}
}
// Update underline/strikeout.
lines.update(&cell);
cell
});
self.renderer.draw_cells(&size_info, glyph_cache, cells);
}
let mut rects = lines.rects(&metrics, &size_info);
if let Some(vi_cursor_point) = vi_cursor_point {
// Indicate vi mode by showing the cursor's position in the top right corner.
let line = (-vi_cursor_point.line.0 + size_info.bottommost_line().0) as usize;
let obstructed_column = Some(vi_cursor_point)
.filter(|point| point.line == -(display_offset as i32))
.map(|point| point.column);
self.draw_line_indicator(config, total_lines, obstructed_column, line);
} else if search_state.regex().is_some() {
// Show current display offset in vi-less search to indicate match position.
self.draw_line_indicator(config, total_lines, None, display_offset);
};
// Draw cursor.
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
// Push visual bell after url/underline/strikeout rects.
let visual_bell_intensity = self.visual_bell.intensity();
if visual_bell_intensity != 0. {
let visual_bell_rect = RenderRect::new(
0.,
0.,
size_info.width(),
size_info.height(),
config.bell.color,
visual_bell_intensity as f32,
);
rects.push(visual_bell_rect);
}
// Handle IME positioning and search bar rendering.
let ime_position = match search_state.regex() {
Some(regex) => {
let search_label = match search_state.direction() {
Direction::Right => FORWARD_SEARCH_LABEL,
Direction::Left => BACKWARD_SEARCH_LABEL,
};
let search_text = Self::format_search(regex, search_label, size_info.columns());
// Render the search bar.
self.draw_search(config, &search_text);
// Draw search bar cursor.
let line = size_info.screen_lines();
let column = Column(search_text.chars().count() - 1);
// Add cursor to search bar if IME is not active.
if self.ime.preedit().is_none() {
let fg = config.colors.footer_bar_foreground();
let shape = CursorShape::Underline;
let cursor_width = NonZeroU32::new(1).unwrap();
let cursor =
RenderableCursor::new(Point::new(line, column), shape, fg, cursor_width);
rects.extend(cursor.rects(&size_info, config.cursor.thickness()));
}
Some(Point::new(line, column))
},
None => {
let num_lines = self.size_info.screen_lines();
match vi_cursor_viewport_point {
None => term::point_to_viewport(display_offset, cursor_point)
.filter(|point| point.line < num_lines),
point => point,
}
},
};
// Handle IME.
if self.ime.is_enabled() {
if let Some(point) = ime_position {
let (fg, bg) = if search_state.regex().is_some() {
(config.colors.footer_bar_foreground(), config.colors.footer_bar_background())
} else {
(foreground_color, background_color)
};
self.draw_ime_preview(point, fg, bg, &mut rects, config);
}
}
if let Some(message) = message_buffer.message() {
let search_offset = usize::from(search_state.regex().is_some());
let text = message.text(&size_info);
// Create a new rectangle for the background.
let start_line = size_info.screen_lines() + search_offset;
let y = size_info.cell_height().mul_add(start_line as f32, size_info.padding_y());
let bg = match message.ty() {
MessageType::Error => config.colors.normal.red,
MessageType::Warning => config.colors.normal.yellow,
};
let x = 0;
let width = size_info.width() as i32;
let height = (size_info.height() - y) as i32;
let message_bar_rect =
RenderRect::new(x as f32, y, width as f32, height as f32, bg, 1.);
// Push message_bar in the end, so it'll be above all other content.
rects.push(message_bar_rect);
// Always damage message bar, since it could have messages of the same size in it.
self.damage_tracker.frame().add_viewport_rect(&size_info, x, y as i32, width, height);
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
// Relay messages to the user.
let glyph_cache = &mut self.glyph_cache;
let fg = config.colors.primary.background;
for (i, message_text) in text.iter().enumerate() {
let point = Point::new(start_line + i, Column(0));
self.renderer.draw_string(
point,
fg,
bg,
message_text.chars(),
&size_info,
glyph_cache,
);
}
} else {
// Draw rectangles.
self.renderer.draw_rects(&size_info, &metrics, rects);
}
self.draw_render_timer(config);
// Draw hyperlink uri preview.
if has_highlighted_hint {
let cursor_point = vi_cursor_point.or(Some(cursor_point));
self.draw_hyperlink_preview(config, cursor_point, display_offset);
}
// Notify winit that we're about to present.
self.window.pre_present_notify();
// Highlight damage for debugging.
if self.damage_tracker.debug {
let damage = self.damage_tracker.shape_frame_damage(self.size_info.into());
let mut rects = Vec::with_capacity(damage.len());
self.highlight_damage(&mut rects);
self.renderer.draw_rects(&self.size_info, &metrics, rects);
}
// Clearing debug highlights from the previous frame requires full redraw.
self.swap_buffers();
if matches!(self.raw_window_handle, RawWindowHandle::Xcb(_) | RawWindowHandle::Xlib(_)) {
// On X11 `swap_buffers` does not block for vsync. However the next OpenGl command
// will block to synchronize (this is `glClear` in Alacritty), which causes a
// permanent one frame delay.
self.renderer.finish();
}
// XXX: Request the new frame after swapping buffers, so the
// time to finish OpenGL operations is accounted for in the timeout.
if !matches!(self.raw_window_handle, RawWindowHandle::Wayland(_)) {
self.request_frame(scheduler);
}
self.damage_tracker.swap_damage();
}
/// Update to a new configuration.
pub fn update_config(&mut self, config: &UiConfig) {
self.damage_tracker.debug = config.debug.highlight_damage;
self.visual_bell.update_config(&config.bell);
self.colors = List::from(&config.colors);
}
/// Update the mouse/vi mode cursor hint highlighting.
///
/// This will return whether the highlighted hints changed.
pub fn update_highlighted_hints<T>(
&mut self,
term: &Term<T>,
config: &UiConfig,
mouse: &Mouse,
modifiers: ModifiersState,
) -> bool {
// Update vi mode cursor hint.
let vi_highlighted_hint = if term.mode().contains(TermMode::VI) {
let mods = ModifiersState::all();
let point = term.vi_mode_cursor.point;
hint::highlighted_at(term, config, point, mods)
} else {
None
};
let mut dirty = vi_highlighted_hint != self.vi_highlighted_hint;
self.vi_highlighted_hint = vi_highlighted_hint;
self.vi_highlighted_hint_age = 0;
// Force full redraw if the vi mode highlight was cleared.
if dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
// Abort if mouse highlighting conditions are not met.
if !mouse.inside_text_area || !term.selection.as_ref().map_or(true, Selection::is_empty) {
if self.highlighted_hint.take().is_some() {
self.damage_tracker.frame().mark_fully_damaged();
dirty = true;
}
return dirty;
}
// Find highlighted hint at mouse position.
let point = mouse.point(&self.size_info, term.grid().display_offset());
let highlighted_hint = hint::highlighted_at(term, config, point, modifiers);
// Update cursor shape.
if highlighted_hint.is_some() {
// If mouse changed the line, we should update the hyperlink preview, since the
// highlighted hint could be disrupted by the old preview.
dirty = self.hint_mouse_point.is_some_and(|p| p.line != point.line);
self.hint_mouse_point = Some(point);
self.window.set_mouse_cursor(CursorIcon::Pointer);
} else if self.highlighted_hint.is_some() {
self.hint_mouse_point = None;
if term.mode().intersects(TermMode::MOUSE_MODE) && !term.mode().contains(TermMode::VI) {
self.window.set_mouse_cursor(CursorIcon::Default);
} else {
self.window.set_mouse_cursor(CursorIcon::Text);
}
}
let mouse_highlight_dirty = self.highlighted_hint != highlighted_hint;
dirty |= mouse_highlight_dirty;
self.highlighted_hint = highlighted_hint;
self.highlighted_hint_age = 0;
// Force full redraw if the mouse cursor highlight was changed.
if mouse_highlight_dirty {
self.damage_tracker.frame().mark_fully_damaged();
}
dirty
}
#[inline(never)]
fn draw_ime_preview(
&mut self,
point: Point<usize>,
fg: Rgb,
bg: Rgb,
rects: &mut Vec<RenderRect>,
config: &UiConfig,
) {
let preedit = match self.ime.preedit() {
Some(preedit) => preedit,
None => {
// In case we don't have preedit, just set the popup point.
self.window.update_ime_position(point, &self.size_info);
return;
},
};
let num_cols = self.size_info.columns();
// Get the visible preedit.
let visible_text: String = match (preedit.cursor_byte_offset, preedit.cursor_end_offset) {
(Some(byte_offset), Some(end_offset)) if end_offset.0 > num_cols => StrShortener::new(
&preedit.text[byte_offset.0..],
num_cols,
ShortenDirection::Right,
Some(SHORTENER),
),
_ => {
StrShortener::new(&preedit.text, num_cols, ShortenDirection::Left, Some(SHORTENER))
},
}
.collect();
let visible_len = visible_text.chars().count();
let end = cmp::min(point.column.0 + visible_len, num_cols);
let start = end.saturating_sub(visible_len);
let start = Point::new(point.line, Column(start));
let end = Point::new(point.line, Column(end - 1));
let glyph_cache = &mut self.glyph_cache;
let metrics = glyph_cache.font_metrics();
self.renderer.draw_string(
start,
fg,
bg,
visible_text.chars(),
&self.size_info,
glyph_cache,
);
// Damage preedit inside the terminal viewport.
if point.line < self.size_info.screen_lines() {
let damage = LineDamageBounds::new(start.line, 0, num_cols);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
}
// Add underline for preedit text.
let underline = RenderLine { start, end, color: fg };
rects.extend(underline.rects(Flags::UNDERLINE, &metrics, &self.size_info));
let ime_popup_point = match preedit.cursor_end_offset {
Some(cursor_end_offset) => {
// Use hollow block when multiple characters are changed at once.
let (shape, width) = if let Some(width) =
NonZeroU32::new((cursor_end_offset.0 - cursor_end_offset.1) as u32)
{
(CursorShape::HollowBlock, width)
} else {
(CursorShape::Beam, NonZeroU32::new(1).unwrap())
};
let cursor_column = Column(
(end.column.0 as isize - cursor_end_offset.0 as isize + 1).max(0) as usize,
);
let cursor_point = Point::new(point.line, cursor_column);
let cursor = RenderableCursor::new(cursor_point, shape, fg, width);
rects.extend(cursor.rects(&self.size_info, config.cursor.thickness()));
cursor_point
},
_ => end,
};
self.window.update_ime_position(ime_popup_point, &self.size_info);
}
/// Format search regex to account for the cursor and fullwidth characters.
fn format_search(search_regex: &str, search_label: &str, max_width: usize) -> String {
let label_len = search_label.len();
// Skip `search_regex` formatting if only label is visible.
if label_len > max_width {
return search_label[..max_width].to_owned();
}
// The search string consists of `search_label` + `search_regex` + `cursor`.
let mut bar_text = String::from(search_label);
bar_text.extend(StrShortener::new(
search_regex,
max_width.wrapping_sub(label_len + 1),
ShortenDirection::Left,
Some(SHORTENER),
));
// Add place for cursor.
bar_text.push(' ');
bar_text
}
/// Draw preview for the currently highlighted `Hyperlink`.
#[inline(never)]
fn draw_hyperlink_preview(
&mut self,
config: &UiConfig,
cursor_point: Option<Point>,
display_offset: usize,
) {
let num_cols = self.size_info.columns();
let uris: Vec<_> = self
.highlighted_hint
.iter()
.chain(&self.vi_highlighted_hint)
.filter_map(|hint| hint.hyperlink().map(|hyperlink| hyperlink.uri()))
.map(|uri| StrShortener::new(uri, num_cols, ShortenDirection::Right, Some(SHORTENER)))
.collect();
if uris.is_empty() {
return;
}
// The maximum amount of protected lines including the ones we'll show preview on.
let max_protected_lines = uris.len() * 2;
// Lines we shouldn't show preview on, because it'll obscure the highlighted hint.
let mut protected_lines = Vec::with_capacity(max_protected_lines);
if self.size_info.screen_lines() > max_protected_lines {
// Prefer to show preview even when it'll likely obscure the highlighted hint, when
// there's no place left for it.
protected_lines.push(self.hint_mouse_point.map(|point| point.line));
protected_lines.push(cursor_point.map(|point| point.line));
}
// Find the line in viewport we can draw preview on without obscuring protected lines.
let viewport_bottom = self.size_info.bottommost_line() - Line(display_offset as i32);
let viewport_top = viewport_bottom - (self.size_info.screen_lines() - 1);
let uri_lines = (viewport_top.0..=viewport_bottom.0)
.rev()
.map(|line| Some(Line(line)))
.filter_map(|line| {
if protected_lines.contains(&line) {
None
} else {
protected_lines.push(line);
line
}
})
.take(uris.len())
.flat_map(|line| term::point_to_viewport(display_offset, Point::new(line, Column(0))));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
for (uri, point) in uris.into_iter().zip(uri_lines) {
// Damage the uri preview.
let damage = LineDamageBounds::new(point.line, point.column.0, num_cols);
self.damage_tracker.frame().damage_line(damage);
// Damage the uri preview for the next frame as well.
self.damage_tracker.next_frame().damage_line(damage);
self.renderer.draw_string(point, fg, bg, uri, &self.size_info, &mut self.glyph_cache);
}
}
/// Draw current search regex.
#[inline(never)]
fn draw_search(&mut self, config: &UiConfig, text: &str) {
// Assure text length is at least num_cols.
let num_cols = self.size_info.columns();
let text = format!("{text:<num_cols$}");
let point = Point::new(self.size_info.screen_lines(), Column(0));
let fg = config.colors.footer_bar_foreground();
let bg = config.colors.footer_bar_background();
self.renderer.draw_string(
point,
fg,
bg,
text.chars(),
&self.size_info,
&mut self.glyph_cache,
);
}
/// Draw render timer.
#[inline(never)]
fn draw_render_timer(&mut self, config: &UiConfig) {
if !config.debug.render_timer {
return;
}
let timing = format!("{:.3} usec", self.meter.average());
let point = Point::new(self.size_info.screen_lines().saturating_sub(2), Column(0));
let fg = config.colors.primary.background;
let bg = config.colors.normal.red;
// Damage render timer for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, timing.len());
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, timing.chars(), &self.size_info, glyph_cache);
}
/// Draw an indicator for the position of a line in history.
#[inline(never)]
fn draw_line_indicator(
&mut self,
config: &UiConfig,
total_lines: usize,
obstructed_column: Option<Column>,
line: usize,
) {
let columns = self.size_info.columns();
let text = format!("[{}/{}]", line, total_lines - 1);
let column = Column(self.size_info.columns().saturating_sub(text.len()));
let point = Point::new(0, column);
// Damage the line indicator for current and next frame.
let damage = LineDamageBounds::new(point.line, point.column.0, columns - 1);
self.damage_tracker.frame().damage_line(damage);
self.damage_tracker.next_frame().damage_line(damage);
let colors = &config.colors;
let fg = colors.line_indicator.foreground.unwrap_or(colors.primary.background);
let bg = colors.line_indicator.background.unwrap_or(colors.primary.foreground);
// Do not render anything if it would obscure the vi mode cursor.
if obstructed_column.map_or(true, |obstructed_column| obstructed_column < column) {
let glyph_cache = &mut self.glyph_cache;
self.renderer.draw_string(point, fg, bg, text.chars(), &self.size_info, glyph_cache);
}
}
/// Highlight damaged rects.
///
/// This function is for debug purposes only.
fn highlight_damage(&self, render_rects: &mut Vec<RenderRect>) {
for damage_rect in &self.damage_tracker.shape_frame_damage(self.size_info.into()) {
let x = damage_rect.x as f32;
let height = damage_rect.height as f32;
let width = damage_rect.width as f32;
let y = damage_y_to_viewport_y(&self.size_info, damage_rect) as f32;
let render_rect = RenderRect::new(x, y, width, height, DAMAGE_RECT_COLOR, 0.5);
render_rects.push(render_rect);
}
}
/// Check whether a hint highlight needs to be cleared.
fn validate_hint_highlights(&mut self, display_offset: usize) {
let frame = self.damage_tracker.frame();
let hints = [
(&mut self.highlighted_hint, &mut self.highlighted_hint_age, true),
(&mut self.vi_highlighted_hint, &mut self.vi_highlighted_hint_age, false),
];
let num_lines = self.size_info.screen_lines();
for (hint, hint_age, reset_mouse) in hints {
let (start, end) = match hint {
Some(hint) => (*hint.bounds().start(), *hint.bounds().end()),
None => continue,
};
// Ignore hints that were created this frame.
*hint_age += 1;
if *hint_age == 1 {
continue;
}
// Convert hint bounds to viewport coordinates.
let start = term::point_to_viewport(display_offset, start)
.filter(|point| point.line < num_lines)
.unwrap_or_default();
let end = term::point_to_viewport(display_offset, end)
.filter(|point| point.line < num_lines)
.unwrap_or_else(|| Point::new(num_lines - 1, self.size_info.last_column()));
// Clear invalidated hints.
if frame.intersects(start, end) {
if reset_mouse {
self.window.set_mouse_cursor(CursorIcon::Default);
}
frame.mark_fully_damaged();
*hint = None;
}
}
}
/// Request a new frame for a window on Wayland.
fn request_frame(&mut self, scheduler: &mut Scheduler) {
// Mark that we've used a frame.
self.window.has_frame = false;
// Get the display vblank interval.
let monitor_vblank_interval = 1_000_000.
/ self
.window
.current_monitor()
.and_then(|monitor| monitor.refresh_rate_millihertz())
.unwrap_or(60_000) as f64;
// Now convert it to micro seconds.
let monitor_vblank_interval =
Duration::from_micros((1000. * monitor_vblank_interval) as u64);
let swap_timeout = self.frame_timer.compute_timeout(monitor_vblank_interval);
let window_id = self.window.id();
let timer_id = TimerId::new(Topic::Frame, window_id);
let event = Event::new(EventType::Frame, window_id);
scheduler.schedule(event, swap_timeout, false, timer_id);
}
}
impl Drop for Display {
fn drop(&mut self) {
// Switch OpenGL context before dropping, otherwise objects (like programs) from other
// contexts might be deleted when dropping renderer.
self.make_current();
unsafe {
ManuallyDrop::drop(&mut self.renderer);
ManuallyDrop::drop(&mut self.context);
ManuallyDrop::drop(&mut self.surface);
}
}
}
/// Input method state.
#[derive(Debug, Default)]
pub struct Ime {
/// Whether the IME is enabled.
enabled: bool,
/// Current IME preedit.
preedit: Option<Preedit>,
}
impl Ime {
#[inline]
pub fn set_enabled(&mut self, is_enabled: bool) {
if is_enabled {
self.enabled = is_enabled
} else {
// Clear state when disabling IME.
*self = Default::default();
}
}
#[inline]
pub fn is_enabled(&self) -> bool {
self.enabled
}
#[inline]
pub fn set_preedit(&mut self, preedit: Option<Preedit>) {
self.preedit = preedit;
}
#[inline]
pub fn preedit(&self) -> Option<&Preedit> {
self.preedit.as_ref()
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Preedit {
/// The preedit text.
text: String,
/// Byte offset for cursor start into the preedit text.
///
/// `None` means that the cursor is invisible.
cursor_byte_offset: Option<(usize, usize)>,
/// The cursor offset from the end of the start of the preedit in char width.
cursor_end_offset: Option<(usize, usize)>,
}
impl Preedit {
pub fn new(text: String, cursor_byte_offset: Option<(usize, usize)>) -> Self {
let cursor_end_offset = if let Some(byte_offset) = cursor_byte_offset {
// Convert byte offset into char offset.
let start_to_end_offset =
text[byte_offset.0..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
let end_to_end_offset =
text[byte_offset.1..].chars().fold(0, |acc, ch| acc + ch.width().unwrap_or(1));
Some((start_to_end_offset, end_to_end_offset))
} else {
None
};
Self { text, cursor_byte_offset, cursor_end_offset }
}
}
/// Pending renderer updates.
///
/// All renderer updates are cached to be applied just before rendering, to avoid platform-specific
/// rendering issues.
#[derive(Debug, Default, Copy, Clone)]
pub struct RendererUpdate {
/// Should resize the window.
resize: bool,
/// Clear font caches.
clear_font_cache: bool,
}
/// The frame timer state.
pub struct FrameTimer {
/// Base timestamp used to compute sync points.
base: Instant,
/// The last timestamp we synced to.
last_synced_timestamp: Instant,
/// The refresh rate we've used to compute sync timestamps.
refresh_interval: Duration,
}
impl FrameTimer {
pub fn new() -> Self {
let now = Instant::now();
Self { base: now, last_synced_timestamp: now, refresh_interval: Duration::ZERO }
}
/// Compute the delay that we should use to achieve the target frame
/// rate.
pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {
let now = Instant::now();
// Handle refresh rate change.
if self.refresh_interval != refresh_interval {
self.base = now;
self.last_synced_timestamp = now;
self.refresh_interval = refresh_interval;
return refresh_interval;
}
let next_frame = self.last_synced_timestamp + self.refresh_interval;
if next_frame < now {
// Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.
let elapsed_micros = (now - self.base).as_micros() as u64;
let refresh_micros = self.refresh_interval.as_micros() as u64;
self.last_synced_timestamp =
now - Duration::from_micros(elapsed_micros % refresh_micros);
Duration::ZERO
} else {
// Redraw on the next `refresh_interval` clock tick.
self.last_synced_timestamp = next_frame;
next_frame - now
}
}
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
fn compute_cell_size(config: &UiConfig, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.font.offset.x);
let offset_y = f64::from(config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
/// Calculate the size of the window given padding, terminal dimensions and cell size.
fn window_size(
config: &UiConfig,
dimensions: Dimensions,
cell_width: f32,
cell_height: f32,
scale_factor: f32,
) -> PhysicalSize<u32> {
let padding = config.window.padding(scale_factor);
let grid_width = cell_width * dimensions.columns.max(MIN_COLUMNS) as f32;
let grid_height = cell_height * dimensions.lines.max(MIN_SCREEN_LINES) as f32;
let width = (padding.0).mul_add(2., grid_width).floor();
let height = (padding.1).mul_add(2., grid_height).floor();
PhysicalSize::new(width as u32, height as u32)
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Duration {\n secs: u64,\n nanos: Nanoseconds, // Always 0 <= nanos < NANOS_PER_SEC\n}"
],
"name": "refresh_interval",
"type": "Duration"
}
],
"end_line": 1598,
"name": "compute_timeout",
"signature": "pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration",
"start_line": 1573
} | {
"class_name": "impl FrameTimer {\n pub fn new() -> Self {\n let now = Instant::now();\n Self { base: now, last_synced_timestamp: now, refresh_interval: Duration::ZERO }\n }\n\n /// Compute the delay that we should use to achieve the target frame\n /// rate.\n pub fn compute_timeout(&mut self, refresh_interval: Duration) -> Duration {\n let now = Instant::now();\n\n // Handle refresh rate change.\n if self.refresh_interval != refresh_interval {\n self.base = now;\n self.last_synced_timestamp = now;\n self.refresh_interval = refresh_interval;\n return refresh_interval;\n }\n\n let next_frame = self.last_synced_timestamp + self.refresh_interval;\n\n if next_frame < now {\n // Redraw immediately if we haven't drawn in over `refresh_interval` microseconds.\n let elapsed_micros = (now - self.base).as_micros() as u64;\n let refresh_micros = self.refresh_interval.as_micros() as u64;\n self.last_synced_timestamp =\n now - Duration::from_micros(elapsed_micros % refresh_micros);\n Duration::ZERO\n } else {\n // Redraw on the next `refresh_interval` clock tick.\n self.last_synced_timestamp = next_frame;\n next_frame - now\n }\n }\n}",
"class_signature": "impl FrameTimer"
} |
new | alacritty-master/alacritty/src/display/content.rs | fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {
// Lookup RGB values.
let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);
let mut bg = Self::compute_bg_rgb(content, cell.bg);
let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {
mem::swap(&mut fg, &mut bg);
1.0
} else {
Self::compute_bg_alpha(content.config, cell.bg)
};
let is_selected = content.terminal_content.selection.is_some_and(|selection| {
selection.contains_cell(
&cell,
content.terminal_content.cursor.point,
content.cursor_shape,
)
});
let display_offset = content.terminal_content.display_offset;
let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));
let colors = &content.config.colors;
let mut character = cell.c;
let mut flags = cell.flags;
let num_cols = content.size.columns();
if let Some((c, is_first)) = content
.hint
.as_mut()
.and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))
{
if is_first {
let (config_fg, config_bg) =
(colors.hints.start.foreground, colors.hints.start.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else if c.is_some() {
let (config_fg, config_bg) =
(colors.hints.end.foreground, colors.hints.end.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else {
flags.insert(Flags::UNDERLINE);
}
character = c.unwrap_or(character);
} else if is_selected {
let config_fg = colors.selection.foreground;
let config_bg = colors.selection.background;
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
if fg == bg && !cell.flags.contains(Flags::HIDDEN) {
// Reveal inversed text when fg/bg is the same.
fg = content.color(NamedColor::Background as usize);
bg = content.color(NamedColor::Foreground as usize);
bg_alpha = 1.0;
}
} else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {
let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));
let (config_fg, config_bg) = if focused {
(colors.search.focused_match.foreground, colors.search.focused_match.background)
} else {
(colors.search.matches.foreground, colors.search.matches.background)
};
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
}
// Apply transparency to all renderable cells if `transparent_background_colors` is set
if bg_alpha > 0. && content.config.colors.transparent_background_colors {
bg_alpha = content.config.window_opacity();
}
// Convert cell point to viewport position.
let cell_point = cell.point;
let point = term::point_to_viewport(display_offset, cell_point).unwrap();
let underline = cell
.underline_color()
.map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));
let zerowidth = cell.zerowidth();
let hyperlink = cell.hyperlink();
let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {
Box::new(RenderableCellExtra {
zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),
hyperlink,
})
});
RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }
} | use std::borrow::Cow;
use std::num::NonZeroU32;
use std::ops::Deref;
use std::{cmp, mem};
use alacritty_terminal::event::EventListener;
use alacritty_terminal::grid::{Dimensions, Indexed};
use alacritty_terminal::index::{Column, Line, Point};
use alacritty_terminal::selection::SelectionRange;
use alacritty_terminal::term::cell::{Cell, Flags, Hyperlink};
use alacritty_terminal::term::search::{Match, RegexSearch};
use alacritty_terminal::term::{self, RenderableContent as TerminalContent, Term, TermMode};
use alacritty_terminal::vte::ansi::{Color, CursorShape, NamedColor};
use crate::config::UiConfig;
use crate::display::color::{CellRgb, List, Rgb, DIM_FACTOR};
use crate::display::hint::{self, HintState};
use crate::display::{Display, SizeInfo};
use crate::event::SearchState;
/// Minimum contrast between a fixed cursor color and the cell's background.
pub const MIN_CURSOR_CONTRAST: f64 = 1.5;
/// Renderable terminal content.
///
/// This provides the terminal cursor and an iterator over all non-empty cells.
pub struct RenderableContent<'a> {
terminal_content: TerminalContent<'a>,
cursor: RenderableCursor,
cursor_shape: CursorShape,
cursor_point: Point<usize>,
search: Option<HintMatches<'a>>,
hint: Option<Hint<'a>>,
config: &'a UiConfig,
colors: &'a List,
focused_match: Option<&'a Match>,
size: &'a SizeInfo,
}
impl<'a> RenderableContent<'a> {
pub fn new<T: EventListener>(
config: &'a UiConfig,
display: &'a mut Display,
term: &'a Term<T>,
search_state: &'a mut SearchState,
) -> Self {
let search = search_state.dfas().map(|dfas| HintMatches::visible_regex_matches(term, dfas));
let focused_match = search_state.focused_match();
let terminal_content = term.renderable_content();
// Find terminal cursor shape.
let cursor_shape = if terminal_content.cursor.shape == CursorShape::Hidden
|| display.cursor_hidden
|| search_state.regex().is_some()
|| display.ime.preedit().is_some()
{
CursorShape::Hidden
} else if !term.is_focused && config.cursor.unfocused_hollow {
CursorShape::HollowBlock
} else {
terminal_content.cursor.shape
};
// Convert terminal cursor point to viewport position.
let cursor_point = terminal_content.cursor.point;
let display_offset = terminal_content.display_offset;
let cursor_point = term::point_to_viewport(display_offset, cursor_point).unwrap();
let hint = if display.hint_state.active() {
display.hint_state.update_matches(term);
Some(Hint::from(&display.hint_state))
} else {
None
};
Self {
colors: &display.colors,
size: &display.size_info,
cursor: RenderableCursor::new_hidden(),
terminal_content,
focused_match,
cursor_shape,
cursor_point,
search,
config,
hint,
}
}
/// Viewport offset.
pub fn display_offset(&self) -> usize {
self.terminal_content.display_offset
}
/// Get the terminal cursor.
pub fn cursor(mut self) -> RenderableCursor {
// Assure this function is only called after the iterator has been drained.
debug_assert!(self.next().is_none());
self.cursor
}
/// Get the RGB value for a color index.
pub fn color(&self, color: usize) -> Rgb {
self.terminal_content.colors[color].map(Rgb).unwrap_or(self.colors[color])
}
pub fn selection_range(&self) -> Option<SelectionRange> {
self.terminal_content.selection
}
/// Assemble the information required to render the terminal cursor.
fn renderable_cursor(&mut self, cell: &RenderableCell) -> RenderableCursor {
// Cursor colors.
let color = if self.terminal_content.mode.contains(TermMode::VI) {
self.config.colors.vi_mode_cursor
} else {
self.config.colors.cursor
};
let cursor_color = self.terminal_content.colors[NamedColor::Cursor]
.map_or(color.background, |c| CellRgb::Rgb(Rgb(c)));
let text_color = color.foreground;
let insufficient_contrast = (!matches!(cursor_color, CellRgb::Rgb(_))
|| !matches!(text_color, CellRgb::Rgb(_)))
&& cell.fg.contrast(*cell.bg) < MIN_CURSOR_CONTRAST;
// Convert from cell colors to RGB.
let mut text_color = text_color.color(cell.fg, cell.bg);
let mut cursor_color = cursor_color.color(cell.fg, cell.bg);
// Invert cursor color with insufficient contrast to prevent invisible cursors.
if insufficient_contrast {
cursor_color = self.config.colors.primary.foreground;
text_color = self.config.colors.primary.background;
}
let width = if cell.flags.contains(Flags::WIDE_CHAR) {
NonZeroU32::new(2).unwrap()
} else {
NonZeroU32::new(1).unwrap()
};
RenderableCursor {
width,
shape: self.cursor_shape,
point: self.cursor_point,
cursor_color,
text_color,
}
}
}
impl Iterator for RenderableContent<'_> {
type Item = RenderableCell;
/// Gets the next renderable cell.
///
/// Skips empty (background) cells and applies any flags to the cell state
/// (eg. invert fg and bg colors).
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
let cell = self.terminal_content.display_iter.next()?;
let mut cell = RenderableCell::new(self, cell);
if self.cursor_point == cell.point {
// Store the cursor which should be rendered.
self.cursor = self.renderable_cursor(&cell);
if self.cursor.shape == CursorShape::Block {
cell.fg = self.cursor.text_color;
cell.bg = self.cursor.cursor_color;
// Since we draw Block cursor by drawing cell below it with a proper color,
// we must adjust alpha to make it visible.
cell.bg_alpha = 1.;
}
return Some(cell);
} else if !cell.is_empty() && !cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
// Skip empty cells and wide char spacers.
return Some(cell);
}
}
}
}
/// Cell ready for rendering.
#[derive(Clone, Debug)]
pub struct RenderableCell {
pub character: char,
pub point: Point<usize>,
pub fg: Rgb,
pub bg: Rgb,
pub bg_alpha: f32,
pub underline: Rgb,
pub flags: Flags,
pub extra: Option<Box<RenderableCellExtra>>,
}
/// Extra storage with rarely present fields for [`RenderableCell`], to reduce the cell size we
/// pass around.
#[derive(Clone, Debug)]
pub struct RenderableCellExtra {
pub zerowidth: Option<Vec<char>>,
pub hyperlink: Option<Hyperlink>,
}
impl RenderableCell {
fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {
// Lookup RGB values.
let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);
let mut bg = Self::compute_bg_rgb(content, cell.bg);
let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {
mem::swap(&mut fg, &mut bg);
1.0
} else {
Self::compute_bg_alpha(content.config, cell.bg)
};
let is_selected = content.terminal_content.selection.is_some_and(|selection| {
selection.contains_cell(
&cell,
content.terminal_content.cursor.point,
content.cursor_shape,
)
});
let display_offset = content.terminal_content.display_offset;
let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));
let colors = &content.config.colors;
let mut character = cell.c;
let mut flags = cell.flags;
let num_cols = content.size.columns();
if let Some((c, is_first)) = content
.hint
.as_mut()
.and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))
{
if is_first {
let (config_fg, config_bg) =
(colors.hints.start.foreground, colors.hints.start.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else if c.is_some() {
let (config_fg, config_bg) =
(colors.hints.end.foreground, colors.hints.end.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else {
flags.insert(Flags::UNDERLINE);
}
character = c.unwrap_or(character);
} else if is_selected {
let config_fg = colors.selection.foreground;
let config_bg = colors.selection.background;
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
if fg == bg && !cell.flags.contains(Flags::HIDDEN) {
// Reveal inversed text when fg/bg is the same.
fg = content.color(NamedColor::Background as usize);
bg = content.color(NamedColor::Foreground as usize);
bg_alpha = 1.0;
}
} else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {
let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));
let (config_fg, config_bg) = if focused {
(colors.search.focused_match.foreground, colors.search.focused_match.background)
} else {
(colors.search.matches.foreground, colors.search.matches.background)
};
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
}
// Apply transparency to all renderable cells if `transparent_background_colors` is set
if bg_alpha > 0. && content.config.colors.transparent_background_colors {
bg_alpha = content.config.window_opacity();
}
// Convert cell point to viewport position.
let cell_point = cell.point;
let point = term::point_to_viewport(display_offset, cell_point).unwrap();
let underline = cell
.underline_color()
.map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));
let zerowidth = cell.zerowidth();
let hyperlink = cell.hyperlink();
let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {
Box::new(RenderableCellExtra {
zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),
hyperlink,
})
});
RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }
}
/// Check if cell contains any renderable content.
fn is_empty(&self) -> bool {
self.bg_alpha == 0.
&& self.character == ' '
&& self.extra.is_none()
&& !self.flags.intersects(Flags::ALL_UNDERLINES | Flags::STRIKEOUT)
}
/// Apply [`CellRgb`] colors to the cell's colors.
fn compute_cell_rgb(
cell_fg: &mut Rgb,
cell_bg: &mut Rgb,
bg_alpha: &mut f32,
fg: CellRgb,
bg: CellRgb,
) {
let old_fg = mem::replace(cell_fg, fg.color(*cell_fg, *cell_bg));
*cell_bg = bg.color(old_fg, *cell_bg);
if bg != CellRgb::CellBackground {
*bg_alpha = 1.0;
}
}
/// Get the RGB color from a cell's foreground color.
fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {
let config = &content.config;
match fg {
Color::Spec(rgb) => match flags & Flags::DIM {
Flags::DIM => {
let rgb: Rgb = rgb.into();
rgb * DIM_FACTOR
},
_ => rgb.into(),
},
Color::Named(ansi) => {
match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {
// If no bright foreground is set, treat it like the BOLD flag doesn't exist.
(_, Flags::DIM_BOLD)
if ansi == NamedColor::Foreground
&& config.colors.primary.bright_foreground.is_none() =>
{
content.color(NamedColor::DimForeground as usize)
},
// Draw bold text in bright colors *and* contains bold flag.
(true, Flags::BOLD) => content.color(ansi.to_bright() as usize),
// Cell is marked as dim and not bold.
(_, Flags::DIM) | (false, Flags::DIM_BOLD) => {
content.color(ansi.to_dim() as usize)
},
// None of the above, keep original color..
_ => content.color(ansi as usize),
}
},
Color::Indexed(idx) => {
let idx = match (
config.colors.draw_bold_text_with_bright_colors,
flags & Flags::DIM_BOLD,
idx,
) {
(true, Flags::BOLD, 0..=7) => idx as usize + 8,
(false, Flags::DIM, 8..=15) => idx as usize - 8,
(false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,
_ => idx as usize,
};
content.color(idx)
},
}
}
/// Get the RGB color from a cell's background color.
#[inline]
fn compute_bg_rgb(content: &RenderableContent<'_>, bg: Color) -> Rgb {
match bg {
Color::Spec(rgb) => rgb.into(),
Color::Named(ansi) => content.color(ansi as usize),
Color::Indexed(idx) => content.color(idx as usize),
}
}
/// Compute background alpha based on cell's original color.
///
/// Since an RGB color matching the background should not be transparent, this is computed
/// using the named input color, rather than checking the RGB of the background after its color
/// is computed.
#[inline]
fn compute_bg_alpha(config: &UiConfig, bg: Color) -> f32 {
if bg == Color::Named(NamedColor::Background) {
0.
} else if config.colors.transparent_background_colors {
config.window_opacity()
} else {
1.
}
}
}
/// Cursor storing all information relevant for rendering.
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct RenderableCursor {
shape: CursorShape,
cursor_color: Rgb,
text_color: Rgb,
width: NonZeroU32,
point: Point<usize>,
}
impl RenderableCursor {
fn new_hidden() -> Self {
let shape = CursorShape::Hidden;
let cursor_color = Rgb::default();
let text_color = Rgb::default();
let width = NonZeroU32::new(1).unwrap();
let point = Point::default();
Self { shape, cursor_color, text_color, width, point }
}
}
impl RenderableCursor {
pub fn new(
point: Point<usize>,
shape: CursorShape,
cursor_color: Rgb,
width: NonZeroU32,
) -> Self {
Self { shape, cursor_color, text_color: cursor_color, width, point }
}
pub fn color(&self) -> Rgb {
self.cursor_color
}
pub fn shape(&self) -> CursorShape {
self.shape
}
pub fn width(&self) -> NonZeroU32 {
self.width
}
pub fn point(&self) -> Point<usize> {
self.point
}
}
/// Regex hints for keyboard shortcuts.
struct Hint<'a> {
/// Hint matches and position.
matches: HintMatches<'a>,
/// Last match checked against current cell position.
labels: &'a Vec<Vec<char>>,
}
impl Hint<'_> {
/// Advance the hint iterator.
///
/// If the point is within a hint, the keyboard shortcut character that should be displayed at
/// this position will be returned.
///
/// The tuple's [`bool`] will be `true` when the character is the first for this hint.
///
/// The tuple's [`Option<char>`] will be [`None`] when the point is part of the match, but not
/// part of the hint label.
fn advance(
&mut self,
viewport_start: Point,
num_cols: usize,
point: Point,
) -> Option<(Option<char>, bool)> {
// Check if we're within a match at all.
if !self.matches.advance(point) {
return None;
}
// Match starting position on this line; linebreaks interrupt the hint labels.
let start = self
.matches
.get(self.matches.index)
.map(|bounds| cmp::max(*bounds.start(), viewport_start))?;
// Position within the hint label.
let line_delta = point.line.0 - start.line.0;
let col_delta = point.column.0 as i32 - start.column.0 as i32;
let label_position = usize::try_from(line_delta * num_cols as i32 + col_delta).unwrap_or(0);
let is_first = label_position == 0;
// Hint label character.
let hint_char = self.labels[self.matches.index]
.get(label_position)
.copied()
.map(|c| (Some(c), is_first))
.unwrap_or((None, false));
Some(hint_char)
}
}
impl<'a> From<&'a HintState> for Hint<'a> {
fn from(hint_state: &'a HintState) -> Self {
let matches = HintMatches::new(hint_state.matches());
Self { labels: hint_state.labels(), matches }
}
}
/// Visible hint match tracking.
#[derive(Default)]
struct HintMatches<'a> {
/// All visible matches.
matches: Cow<'a, [Match]>,
/// Index of the last match checked.
index: usize,
}
impl<'a> HintMatches<'a> {
/// Create new renderable matches iterator..
fn new(matches: impl Into<Cow<'a, [Match]>>) -> Self {
Self { matches: matches.into(), index: 0 }
}
/// Create from regex matches on term visible part.
fn visible_regex_matches<T>(term: &Term<T>, dfas: &mut RegexSearch) -> Self {
let matches = hint::visible_regex_match_iter(term, dfas).collect::<Vec<_>>();
Self::new(matches)
}
/// Advance the regex tracker to the next point.
///
/// This will return `true` if the point passed is part of a regex match.
fn advance(&mut self, point: Point) -> bool {
while let Some(bounds) = self.get(self.index) {
if bounds.start() > &point {
break;
} else if bounds.end() < &point {
self.index += 1;
} else {
return true;
}
}
false
}
}
impl Deref for HintMatches<'_> {
type Target = [Match];
fn deref(&self) -> &Self::Target {
self.matches.deref()
}
}
| rust | {
"argument_definitions": [],
"end_line": 299,
"name": "new",
"signature": "fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self",
"start_line": 209
} | {
"class_name": "impl RenderableCell {\n fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {\n // Lookup RGB values.\n let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);\n let mut bg = Self::compute_bg_rgb(content, cell.bg);\n\n let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {\n mem::swap(&mut fg, &mut bg);\n 1.0\n } else {\n Self::compute_bg_alpha(content.config, cell.bg)\n };\n\n let is_selected = content.terminal_content.selection.is_some_and(|selection| {\n selection.contains_cell(\n &cell,\n content.terminal_content.cursor.point,\n content.cursor_shape,\n )\n });\n\n let display_offset = content.terminal_content.display_offset;\n let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));\n let colors = &content.config.colors;\n let mut character = cell.c;\n let mut flags = cell.flags;\n\n let num_cols = content.size.columns();\n if let Some((c, is_first)) = content\n .hint\n .as_mut()\n .and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))\n {\n if is_first {\n let (config_fg, config_bg) =\n (colors.hints.start.foreground, colors.hints.start.background);\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n } else if c.is_some() {\n let (config_fg, config_bg) =\n (colors.hints.end.foreground, colors.hints.end.background);\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n } else {\n flags.insert(Flags::UNDERLINE);\n }\n\n character = c.unwrap_or(character);\n } else if is_selected {\n let config_fg = colors.selection.foreground;\n let config_bg = colors.selection.background;\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n\n if fg == bg && !cell.flags.contains(Flags::HIDDEN) {\n // Reveal inversed text when fg/bg is the same.\n fg = content.color(NamedColor::Background as usize);\n bg = content.color(NamedColor::Foreground as usize);\n bg_alpha = 1.0;\n }\n } else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {\n let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));\n let (config_fg, config_bg) = if focused {\n (colors.search.focused_match.foreground, colors.search.focused_match.background)\n } else {\n (colors.search.matches.foreground, colors.search.matches.background)\n };\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n }\n\n // Apply transparency to all renderable cells if `transparent_background_colors` is set\n if bg_alpha > 0. && content.config.colors.transparent_background_colors {\n bg_alpha = content.config.window_opacity();\n }\n\n // Convert cell point to viewport position.\n let cell_point = cell.point;\n let point = term::point_to_viewport(display_offset, cell_point).unwrap();\n\n let underline = cell\n .underline_color()\n .map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));\n\n let zerowidth = cell.zerowidth();\n let hyperlink = cell.hyperlink();\n\n let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {\n Box::new(RenderableCellExtra {\n zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),\n hyperlink,\n })\n });\n\n RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }\n }\n\n /// Check if cell contains any renderable content.\n fn is_empty(&self) -> bool {\n self.bg_alpha == 0.\n && self.character == ' '\n && self.extra.is_none()\n && !self.flags.intersects(Flags::ALL_UNDERLINES | Flags::STRIKEOUT)\n }\n\n /// Apply [`CellRgb`] colors to the cell's colors.\n fn compute_cell_rgb(\n cell_fg: &mut Rgb,\n cell_bg: &mut Rgb,\n bg_alpha: &mut f32,\n fg: CellRgb,\n bg: CellRgb,\n ) {\n let old_fg = mem::replace(cell_fg, fg.color(*cell_fg, *cell_bg));\n *cell_bg = bg.color(old_fg, *cell_bg);\n\n if bg != CellRgb::CellBackground {\n *bg_alpha = 1.0;\n }\n }\n\n /// Get the RGB color from a cell's foreground color.\n fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {\n let config = &content.config;\n match fg {\n Color::Spec(rgb) => match flags & Flags::DIM {\n Flags::DIM => {\n let rgb: Rgb = rgb.into();\n rgb * DIM_FACTOR\n },\n _ => rgb.into(),\n },\n Color::Named(ansi) => {\n match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {\n // If no bright foreground is set, treat it like the BOLD flag doesn't exist.\n (_, Flags::DIM_BOLD)\n if ansi == NamedColor::Foreground\n && config.colors.primary.bright_foreground.is_none() =>\n {\n content.color(NamedColor::DimForeground as usize)\n },\n // Draw bold text in bright colors *and* contains bold flag.\n (true, Flags::BOLD) => content.color(ansi.to_bright() as usize),\n // Cell is marked as dim and not bold.\n (_, Flags::DIM) | (false, Flags::DIM_BOLD) => {\n content.color(ansi.to_dim() as usize)\n },\n // None of the above, keep original color..\n _ => content.color(ansi as usize),\n }\n },\n Color::Indexed(idx) => {\n let idx = match (\n config.colors.draw_bold_text_with_bright_colors,\n flags & Flags::DIM_BOLD,\n idx,\n ) {\n (true, Flags::BOLD, 0..=7) => idx as usize + 8,\n (false, Flags::DIM, 8..=15) => idx as usize - 8,\n (false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,\n _ => idx as usize,\n };\n\n content.color(idx)\n },\n }\n }\n\n /// Get the RGB color from a cell's background color.\n #[inline]\n fn compute_bg_rgb(content: &RenderableContent<'_>, bg: Color) -> Rgb {\n match bg {\n Color::Spec(rgb) => rgb.into(),\n Color::Named(ansi) => content.color(ansi as usize),\n Color::Indexed(idx) => content.color(idx as usize),\n }\n }\n\n /// Compute background alpha based on cell's original color.\n ///\n /// Since an RGB color matching the background should not be transparent, this is computed\n /// using the named input color, rather than checking the RGB of the background after its color\n /// is computed.\n #[inline]\n fn compute_bg_alpha(config: &UiConfig, bg: Color) -> f32 {\n if bg == Color::Named(NamedColor::Background) {\n 0.\n } else if config.colors.transparent_background_colors {\n config.window_opacity()\n } else {\n 1.\n }\n }\n}",
"class_signature": "impl RenderableCell"
} |
compute_fg_rgb | alacritty-master/alacritty/src/display/content.rs | fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {
let config = &content.config;
match fg {
Color::Spec(rgb) => match flags & Flags::DIM {
Flags::DIM => {
let rgb: Rgb = rgb.into();
rgb * DIM_FACTOR
},
_ => rgb.into(),
},
Color::Named(ansi) => {
match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {
// If no bright foreground is set, treat it like the BOLD flag doesn't exist.
(_, Flags::DIM_BOLD)
if ansi == NamedColor::Foreground
&& config.colors.primary.bright_foreground.is_none() =>
{
content.color(NamedColor::DimForeground as usize)
},
// Draw bold text in bright colors *and* contains bold flag.
(true, Flags::BOLD) => content.color(ansi.to_bright() as usize),
// Cell is marked as dim and not bold.
(_, Flags::DIM) | (false, Flags::DIM_BOLD) => {
content.color(ansi.to_dim() as usize)
},
// None of the above, keep original color..
_ => content.color(ansi as usize),
}
},
Color::Indexed(idx) => {
let idx = match (
config.colors.draw_bold_text_with_bright_colors,
flags & Flags::DIM_BOLD,
idx,
) {
(true, Flags::BOLD, 0..=7) => idx as usize + 8,
(false, Flags::DIM, 8..=15) => idx as usize - 8,
(false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,
_ => idx as usize,
};
content.color(idx)
},
}
} | use std::borrow::Cow;
use std::num::NonZeroU32;
use std::ops::Deref;
use std::{cmp, mem};
use alacritty_terminal::event::EventListener;
use alacritty_terminal::grid::{Dimensions, Indexed};
use alacritty_terminal::index::{Column, Line, Point};
use alacritty_terminal::selection::SelectionRange;
use alacritty_terminal::term::cell::{Cell, Flags, Hyperlink};
use alacritty_terminal::term::search::{Match, RegexSearch};
use alacritty_terminal::term::{self, RenderableContent as TerminalContent, Term, TermMode};
use alacritty_terminal::vte::ansi::{Color, CursorShape, NamedColor};
use crate::config::UiConfig;
use crate::display::color::{CellRgb, List, Rgb, DIM_FACTOR};
use crate::display::hint::{self, HintState};
use crate::display::{Display, SizeInfo};
use crate::event::SearchState;
/// Minimum contrast between a fixed cursor color and the cell's background.
pub const MIN_CURSOR_CONTRAST: f64 = 1.5;
/// Renderable terminal content.
///
/// This provides the terminal cursor and an iterator over all non-empty cells.
pub struct RenderableContent<'a> {
terminal_content: TerminalContent<'a>,
cursor: RenderableCursor,
cursor_shape: CursorShape,
cursor_point: Point<usize>,
search: Option<HintMatches<'a>>,
hint: Option<Hint<'a>>,
config: &'a UiConfig,
colors: &'a List,
focused_match: Option<&'a Match>,
size: &'a SizeInfo,
}
impl<'a> RenderableContent<'a> {
pub fn new<T: EventListener>(
config: &'a UiConfig,
display: &'a mut Display,
term: &'a Term<T>,
search_state: &'a mut SearchState,
) -> Self {
let search = search_state.dfas().map(|dfas| HintMatches::visible_regex_matches(term, dfas));
let focused_match = search_state.focused_match();
let terminal_content = term.renderable_content();
// Find terminal cursor shape.
let cursor_shape = if terminal_content.cursor.shape == CursorShape::Hidden
|| display.cursor_hidden
|| search_state.regex().is_some()
|| display.ime.preedit().is_some()
{
CursorShape::Hidden
} else if !term.is_focused && config.cursor.unfocused_hollow {
CursorShape::HollowBlock
} else {
terminal_content.cursor.shape
};
// Convert terminal cursor point to viewport position.
let cursor_point = terminal_content.cursor.point;
let display_offset = terminal_content.display_offset;
let cursor_point = term::point_to_viewport(display_offset, cursor_point).unwrap();
let hint = if display.hint_state.active() {
display.hint_state.update_matches(term);
Some(Hint::from(&display.hint_state))
} else {
None
};
Self {
colors: &display.colors,
size: &display.size_info,
cursor: RenderableCursor::new_hidden(),
terminal_content,
focused_match,
cursor_shape,
cursor_point,
search,
config,
hint,
}
}
/// Viewport offset.
pub fn display_offset(&self) -> usize {
self.terminal_content.display_offset
}
/// Get the terminal cursor.
pub fn cursor(mut self) -> RenderableCursor {
// Assure this function is only called after the iterator has been drained.
debug_assert!(self.next().is_none());
self.cursor
}
/// Get the RGB value for a color index.
pub fn color(&self, color: usize) -> Rgb {
self.terminal_content.colors[color].map(Rgb).unwrap_or(self.colors[color])
}
pub fn selection_range(&self) -> Option<SelectionRange> {
self.terminal_content.selection
}
/// Assemble the information required to render the terminal cursor.
fn renderable_cursor(&mut self, cell: &RenderableCell) -> RenderableCursor {
// Cursor colors.
let color = if self.terminal_content.mode.contains(TermMode::VI) {
self.config.colors.vi_mode_cursor
} else {
self.config.colors.cursor
};
let cursor_color = self.terminal_content.colors[NamedColor::Cursor]
.map_or(color.background, |c| CellRgb::Rgb(Rgb(c)));
let text_color = color.foreground;
let insufficient_contrast = (!matches!(cursor_color, CellRgb::Rgb(_))
|| !matches!(text_color, CellRgb::Rgb(_)))
&& cell.fg.contrast(*cell.bg) < MIN_CURSOR_CONTRAST;
// Convert from cell colors to RGB.
let mut text_color = text_color.color(cell.fg, cell.bg);
let mut cursor_color = cursor_color.color(cell.fg, cell.bg);
// Invert cursor color with insufficient contrast to prevent invisible cursors.
if insufficient_contrast {
cursor_color = self.config.colors.primary.foreground;
text_color = self.config.colors.primary.background;
}
let width = if cell.flags.contains(Flags::WIDE_CHAR) {
NonZeroU32::new(2).unwrap()
} else {
NonZeroU32::new(1).unwrap()
};
RenderableCursor {
width,
shape: self.cursor_shape,
point: self.cursor_point,
cursor_color,
text_color,
}
}
}
impl Iterator for RenderableContent<'_> {
type Item = RenderableCell;
/// Gets the next renderable cell.
///
/// Skips empty (background) cells and applies any flags to the cell state
/// (eg. invert fg and bg colors).
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
let cell = self.terminal_content.display_iter.next()?;
let mut cell = RenderableCell::new(self, cell);
if self.cursor_point == cell.point {
// Store the cursor which should be rendered.
self.cursor = self.renderable_cursor(&cell);
if self.cursor.shape == CursorShape::Block {
cell.fg = self.cursor.text_color;
cell.bg = self.cursor.cursor_color;
// Since we draw Block cursor by drawing cell below it with a proper color,
// we must adjust alpha to make it visible.
cell.bg_alpha = 1.;
}
return Some(cell);
} else if !cell.is_empty() && !cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
// Skip empty cells and wide char spacers.
return Some(cell);
}
}
}
}
/// Cell ready for rendering.
#[derive(Clone, Debug)]
pub struct RenderableCell {
pub character: char,
pub point: Point<usize>,
pub fg: Rgb,
pub bg: Rgb,
pub bg_alpha: f32,
pub underline: Rgb,
pub flags: Flags,
pub extra: Option<Box<RenderableCellExtra>>,
}
/// Extra storage with rarely present fields for [`RenderableCell`], to reduce the cell size we
/// pass around.
#[derive(Clone, Debug)]
pub struct RenderableCellExtra {
pub zerowidth: Option<Vec<char>>,
pub hyperlink: Option<Hyperlink>,
}
impl RenderableCell {
fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {
// Lookup RGB values.
let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);
let mut bg = Self::compute_bg_rgb(content, cell.bg);
let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {
mem::swap(&mut fg, &mut bg);
1.0
} else {
Self::compute_bg_alpha(content.config, cell.bg)
};
let is_selected = content.terminal_content.selection.is_some_and(|selection| {
selection.contains_cell(
&cell,
content.terminal_content.cursor.point,
content.cursor_shape,
)
});
let display_offset = content.terminal_content.display_offset;
let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));
let colors = &content.config.colors;
let mut character = cell.c;
let mut flags = cell.flags;
let num_cols = content.size.columns();
if let Some((c, is_first)) = content
.hint
.as_mut()
.and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))
{
if is_first {
let (config_fg, config_bg) =
(colors.hints.start.foreground, colors.hints.start.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else if c.is_some() {
let (config_fg, config_bg) =
(colors.hints.end.foreground, colors.hints.end.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else {
flags.insert(Flags::UNDERLINE);
}
character = c.unwrap_or(character);
} else if is_selected {
let config_fg = colors.selection.foreground;
let config_bg = colors.selection.background;
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
if fg == bg && !cell.flags.contains(Flags::HIDDEN) {
// Reveal inversed text when fg/bg is the same.
fg = content.color(NamedColor::Background as usize);
bg = content.color(NamedColor::Foreground as usize);
bg_alpha = 1.0;
}
} else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {
let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));
let (config_fg, config_bg) = if focused {
(colors.search.focused_match.foreground, colors.search.focused_match.background)
} else {
(colors.search.matches.foreground, colors.search.matches.background)
};
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
}
// Apply transparency to all renderable cells if `transparent_background_colors` is set
if bg_alpha > 0. && content.config.colors.transparent_background_colors {
bg_alpha = content.config.window_opacity();
}
// Convert cell point to viewport position.
let cell_point = cell.point;
let point = term::point_to_viewport(display_offset, cell_point).unwrap();
let underline = cell
.underline_color()
.map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));
let zerowidth = cell.zerowidth();
let hyperlink = cell.hyperlink();
let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {
Box::new(RenderableCellExtra {
zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),
hyperlink,
})
});
RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }
}
/// Check if cell contains any renderable content.
fn is_empty(&self) -> bool {
self.bg_alpha == 0.
&& self.character == ' '
&& self.extra.is_none()
&& !self.flags.intersects(Flags::ALL_UNDERLINES | Flags::STRIKEOUT)
}
/// Apply [`CellRgb`] colors to the cell's colors.
fn compute_cell_rgb(
cell_fg: &mut Rgb,
cell_bg: &mut Rgb,
bg_alpha: &mut f32,
fg: CellRgb,
bg: CellRgb,
) {
let old_fg = mem::replace(cell_fg, fg.color(*cell_fg, *cell_bg));
*cell_bg = bg.color(old_fg, *cell_bg);
if bg != CellRgb::CellBackground {
*bg_alpha = 1.0;
}
}
/// Get the RGB color from a cell's foreground color.
fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {
let config = &content.config;
match fg {
Color::Spec(rgb) => match flags & Flags::DIM {
Flags::DIM => {
let rgb: Rgb = rgb.into();
rgb * DIM_FACTOR
},
_ => rgb.into(),
},
Color::Named(ansi) => {
match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {
// If no bright foreground is set, treat it like the BOLD flag doesn't exist.
(_, Flags::DIM_BOLD)
if ansi == NamedColor::Foreground
&& config.colors.primary.bright_foreground.is_none() =>
{
content.color(NamedColor::DimForeground as usize)
},
// Draw bold text in bright colors *and* contains bold flag.
(true, Flags::BOLD) => content.color(ansi.to_bright() as usize),
// Cell is marked as dim and not bold.
(_, Flags::DIM) | (false, Flags::DIM_BOLD) => {
content.color(ansi.to_dim() as usize)
},
// None of the above, keep original color..
_ => content.color(ansi as usize),
}
},
Color::Indexed(idx) => {
let idx = match (
config.colors.draw_bold_text_with_bright_colors,
flags & Flags::DIM_BOLD,
idx,
) {
(true, Flags::BOLD, 0..=7) => idx as usize + 8,
(false, Flags::DIM, 8..=15) => idx as usize - 8,
(false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,
_ => idx as usize,
};
content.color(idx)
},
}
}
/// Get the RGB color from a cell's background color.
#[inline]
fn compute_bg_rgb(content: &RenderableContent<'_>, bg: Color) -> Rgb {
match bg {
Color::Spec(rgb) => rgb.into(),
Color::Named(ansi) => content.color(ansi as usize),
Color::Indexed(idx) => content.color(idx as usize),
}
}
/// Compute background alpha based on cell's original color.
///
/// Since an RGB color matching the background should not be transparent, this is computed
/// using the named input color, rather than checking the RGB of the background after its color
/// is computed.
#[inline]
fn compute_bg_alpha(config: &UiConfig, bg: Color) -> f32 {
if bg == Color::Named(NamedColor::Background) {
0.
} else if config.colors.transparent_background_colors {
config.window_opacity()
} else {
1.
}
}
}
/// Cursor storing all information relevant for rendering.
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct RenderableCursor {
shape: CursorShape,
cursor_color: Rgb,
text_color: Rgb,
width: NonZeroU32,
point: Point<usize>,
}
impl RenderableCursor {
fn new_hidden() -> Self {
let shape = CursorShape::Hidden;
let cursor_color = Rgb::default();
let text_color = Rgb::default();
let width = NonZeroU32::new(1).unwrap();
let point = Point::default();
Self { shape, cursor_color, text_color, width, point }
}
}
impl RenderableCursor {
pub fn new(
point: Point<usize>,
shape: CursorShape,
cursor_color: Rgb,
width: NonZeroU32,
) -> Self {
Self { shape, cursor_color, text_color: cursor_color, width, point }
}
pub fn color(&self) -> Rgb {
self.cursor_color
}
pub fn shape(&self) -> CursorShape {
self.shape
}
pub fn width(&self) -> NonZeroU32 {
self.width
}
pub fn point(&self) -> Point<usize> {
self.point
}
}
/// Regex hints for keyboard shortcuts.
struct Hint<'a> {
/// Hint matches and position.
matches: HintMatches<'a>,
/// Last match checked against current cell position.
labels: &'a Vec<Vec<char>>,
}
impl Hint<'_> {
/// Advance the hint iterator.
///
/// If the point is within a hint, the keyboard shortcut character that should be displayed at
/// this position will be returned.
///
/// The tuple's [`bool`] will be `true` when the character is the first for this hint.
///
/// The tuple's [`Option<char>`] will be [`None`] when the point is part of the match, but not
/// part of the hint label.
fn advance(
&mut self,
viewport_start: Point,
num_cols: usize,
point: Point,
) -> Option<(Option<char>, bool)> {
// Check if we're within a match at all.
if !self.matches.advance(point) {
return None;
}
// Match starting position on this line; linebreaks interrupt the hint labels.
let start = self
.matches
.get(self.matches.index)
.map(|bounds| cmp::max(*bounds.start(), viewport_start))?;
// Position within the hint label.
let line_delta = point.line.0 - start.line.0;
let col_delta = point.column.0 as i32 - start.column.0 as i32;
let label_position = usize::try_from(line_delta * num_cols as i32 + col_delta).unwrap_or(0);
let is_first = label_position == 0;
// Hint label character.
let hint_char = self.labels[self.matches.index]
.get(label_position)
.copied()
.map(|c| (Some(c), is_first))
.unwrap_or((None, false));
Some(hint_char)
}
}
impl<'a> From<&'a HintState> for Hint<'a> {
fn from(hint_state: &'a HintState) -> Self {
let matches = HintMatches::new(hint_state.matches());
Self { labels: hint_state.labels(), matches }
}
}
/// Visible hint match tracking.
#[derive(Default)]
struct HintMatches<'a> {
/// All visible matches.
matches: Cow<'a, [Match]>,
/// Index of the last match checked.
index: usize,
}
impl<'a> HintMatches<'a> {
/// Create new renderable matches iterator..
fn new(matches: impl Into<Cow<'a, [Match]>>) -> Self {
Self { matches: matches.into(), index: 0 }
}
/// Create from regex matches on term visible part.
fn visible_regex_matches<T>(term: &Term<T>, dfas: &mut RegexSearch) -> Self {
let matches = hint::visible_regex_match_iter(term, dfas).collect::<Vec<_>>();
Self::new(matches)
}
/// Advance the regex tracker to the next point.
///
/// This will return `true` if the point passed is part of a regex match.
fn advance(&mut self, point: Point) -> bool {
while let Some(bounds) = self.get(self.index) {
if bounds.start() > &point {
break;
} else if bounds.end() < &point {
self.index += 1;
} else {
return true;
}
}
false
}
}
impl Deref for HintMatches<'_> {
type Target = [Match];
fn deref(&self) -> &Self::Target {
self.matches.deref()
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct RenderableContent<'a> {\n terminal_content: TerminalContent<'a>,\n cursor: RenderableCursor,\n cursor_shape: CursorShape,\n cursor_point: Point<usize>,\n search: Option<HintMatches<'a>>,\n hint: Option<Hint<'a>>,\n config: &'a UiConfig,\n colors: &'a List,\n focused_match: Option<&'a Match>,\n size: &'a SizeInfo,\n}"
],
"name": "content",
"type": "&RenderableContent<'_>"
},
{
"definitions": [
"pub enum Color {\n Named(NamedColor),\n Spec(Rgb),\n Indexed(u8),\n}"
],
"name": "fg",
"type": "Color"
},
{
"definitions": [
" pub struct Flags: u16 {\n const INVERSE = 0b0000_0000_0000_0001;\n const BOLD = 0b0000_0000_0000_0010;\n const ITALIC = 0b0000_0000_0000_0100;\n const BOLD_ITALIC = 0b0000_0000_0000_0110;\n const UNDERLINE = 0b0000_0000_0000_1000;\n const WRAPLINE = 0b0000_0000_0001_0000;\n const WIDE_CHAR = 0b0000_0000_0010_0000;\n const WIDE_CHAR_SPACER = 0b0000_0000_0100_0000;\n const DIM = 0b0000_0000_1000_0000;\n const DIM_BOLD = 0b0000_0000_1000_0010;\n const HIDDEN = 0b0000_0001_0000_0000;\n const STRIKEOUT = 0b0000_0010_0000_0000;\n const LEADING_WIDE_CHAR_SPACER = 0b0000_0100_0000_0000;\n const DOUBLE_UNDERLINE = 0b0000_1000_0000_0000;\n const UNDERCURL = 0b0001_0000_0000_0000;\n const DOTTED_UNDERLINE = 0b0010_0000_0000_0000;\n const DASHED_UNDERLINE = 0b0100_0000_0000_0000;\n const ALL_UNDERLINES = Self::UNDERLINE.bits() | Self::DOUBLE_UNDERLINE.bits()\n | Self::UNDERCURL.bits() | Self::DOTTED_UNDERLINE.bits()\n | Self::DASHED_UNDERLINE.bits();\n }"
],
"name": "flags",
"type": "Flags"
}
],
"end_line": 370,
"name": "compute_fg_rgb",
"signature": "fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb",
"start_line": 326
} | {
"class_name": "impl RenderableCell {\n fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {\n // Lookup RGB values.\n let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);\n let mut bg = Self::compute_bg_rgb(content, cell.bg);\n\n let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {\n mem::swap(&mut fg, &mut bg);\n 1.0\n } else {\n Self::compute_bg_alpha(content.config, cell.bg)\n };\n\n let is_selected = content.terminal_content.selection.is_some_and(|selection| {\n selection.contains_cell(\n &cell,\n content.terminal_content.cursor.point,\n content.cursor_shape,\n )\n });\n\n let display_offset = content.terminal_content.display_offset;\n let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));\n let colors = &content.config.colors;\n let mut character = cell.c;\n let mut flags = cell.flags;\n\n let num_cols = content.size.columns();\n if let Some((c, is_first)) = content\n .hint\n .as_mut()\n .and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))\n {\n if is_first {\n let (config_fg, config_bg) =\n (colors.hints.start.foreground, colors.hints.start.background);\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n } else if c.is_some() {\n let (config_fg, config_bg) =\n (colors.hints.end.foreground, colors.hints.end.background);\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n } else {\n flags.insert(Flags::UNDERLINE);\n }\n\n character = c.unwrap_or(character);\n } else if is_selected {\n let config_fg = colors.selection.foreground;\n let config_bg = colors.selection.background;\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n\n if fg == bg && !cell.flags.contains(Flags::HIDDEN) {\n // Reveal inversed text when fg/bg is the same.\n fg = content.color(NamedColor::Background as usize);\n bg = content.color(NamedColor::Foreground as usize);\n bg_alpha = 1.0;\n }\n } else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {\n let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));\n let (config_fg, config_bg) = if focused {\n (colors.search.focused_match.foreground, colors.search.focused_match.background)\n } else {\n (colors.search.matches.foreground, colors.search.matches.background)\n };\n Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);\n }\n\n // Apply transparency to all renderable cells if `transparent_background_colors` is set\n if bg_alpha > 0. && content.config.colors.transparent_background_colors {\n bg_alpha = content.config.window_opacity();\n }\n\n // Convert cell point to viewport position.\n let cell_point = cell.point;\n let point = term::point_to_viewport(display_offset, cell_point).unwrap();\n\n let underline = cell\n .underline_color()\n .map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));\n\n let zerowidth = cell.zerowidth();\n let hyperlink = cell.hyperlink();\n\n let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {\n Box::new(RenderableCellExtra {\n zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),\n hyperlink,\n })\n });\n\n RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }\n }\n\n /// Check if cell contains any renderable content.\n fn is_empty(&self) -> bool {\n self.bg_alpha == 0.\n && self.character == ' '\n && self.extra.is_none()\n && !self.flags.intersects(Flags::ALL_UNDERLINES | Flags::STRIKEOUT)\n }\n\n /// Apply [`CellRgb`] colors to the cell's colors.\n fn compute_cell_rgb(\n cell_fg: &mut Rgb,\n cell_bg: &mut Rgb,\n bg_alpha: &mut f32,\n fg: CellRgb,\n bg: CellRgb,\n ) {\n let old_fg = mem::replace(cell_fg, fg.color(*cell_fg, *cell_bg));\n *cell_bg = bg.color(old_fg, *cell_bg);\n\n if bg != CellRgb::CellBackground {\n *bg_alpha = 1.0;\n }\n }\n\n /// Get the RGB color from a cell's foreground color.\n fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {\n let config = &content.config;\n match fg {\n Color::Spec(rgb) => match flags & Flags::DIM {\n Flags::DIM => {\n let rgb: Rgb = rgb.into();\n rgb * DIM_FACTOR\n },\n _ => rgb.into(),\n },\n Color::Named(ansi) => {\n match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {\n // If no bright foreground is set, treat it like the BOLD flag doesn't exist.\n (_, Flags::DIM_BOLD)\n if ansi == NamedColor::Foreground\n && config.colors.primary.bright_foreground.is_none() =>\n {\n content.color(NamedColor::DimForeground as usize)\n },\n // Draw bold text in bright colors *and* contains bold flag.\n (true, Flags::BOLD) => content.color(ansi.to_bright() as usize),\n // Cell is marked as dim and not bold.\n (_, Flags::DIM) | (false, Flags::DIM_BOLD) => {\n content.color(ansi.to_dim() as usize)\n },\n // None of the above, keep original color..\n _ => content.color(ansi as usize),\n }\n },\n Color::Indexed(idx) => {\n let idx = match (\n config.colors.draw_bold_text_with_bright_colors,\n flags & Flags::DIM_BOLD,\n idx,\n ) {\n (true, Flags::BOLD, 0..=7) => idx as usize + 8,\n (false, Flags::DIM, 8..=15) => idx as usize - 8,\n (false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,\n _ => idx as usize,\n };\n\n content.color(idx)\n },\n }\n }\n\n /// Get the RGB color from a cell's background color.\n #[inline]\n fn compute_bg_rgb(content: &RenderableContent<'_>, bg: Color) -> Rgb {\n match bg {\n Color::Spec(rgb) => rgb.into(),\n Color::Named(ansi) => content.color(ansi as usize),\n Color::Indexed(idx) => content.color(idx as usize),\n }\n }\n\n /// Compute background alpha based on cell's original color.\n ///\n /// Since an RGB color matching the background should not be transparent, this is computed\n /// using the named input color, rather than checking the RGB of the background after its color\n /// is computed.\n #[inline]\n fn compute_bg_alpha(config: &UiConfig, bg: Color) -> f32 {\n if bg == Color::Named(NamedColor::Background) {\n 0.\n } else if config.colors.transparent_background_colors {\n config.window_opacity()\n } else {\n 1.\n }\n }\n}",
"class_signature": "impl RenderableCell"
} |
advance | alacritty-master/alacritty/src/display/content.rs | fn advance(
&mut self,
viewport_start: Point,
num_cols: usize,
point: Point,
) -> Option<(Option<char>, bool)> {
// Check if we're within a match at all.
if !self.matches.advance(point) {
return None;
}
// Match starting position on this line; linebreaks interrupt the hint labels.
let start = self
.matches
.get(self.matches.index)
.map(|bounds| cmp::max(*bounds.start(), viewport_start))?;
// Position within the hint label.
let line_delta = point.line.0 - start.line.0;
let col_delta = point.column.0 as i32 - start.column.0 as i32;
let label_position = usize::try_from(line_delta * num_cols as i32 + col_delta).unwrap_or(0);
let is_first = label_position == 0;
// Hint label character.
let hint_char = self.labels[self.matches.index]
.get(label_position)
.copied()
.map(|c| (Some(c), is_first))
.unwrap_or((None, false));
Some(hint_char)
} | use std::borrow::Cow;
use std::num::NonZeroU32;
use std::ops::Deref;
use std::{cmp, mem};
use alacritty_terminal::event::EventListener;
use alacritty_terminal::grid::{Dimensions, Indexed};
use alacritty_terminal::index::{Column, Line, Point};
use alacritty_terminal::selection::SelectionRange;
use alacritty_terminal::term::cell::{Cell, Flags, Hyperlink};
use alacritty_terminal::term::search::{Match, RegexSearch};
use alacritty_terminal::term::{self, RenderableContent as TerminalContent, Term, TermMode};
use alacritty_terminal::vte::ansi::{Color, CursorShape, NamedColor};
use crate::config::UiConfig;
use crate::display::color::{CellRgb, List, Rgb, DIM_FACTOR};
use crate::display::hint::{self, HintState};
use crate::display::{Display, SizeInfo};
use crate::event::SearchState;
/// Minimum contrast between a fixed cursor color and the cell's background.
pub const MIN_CURSOR_CONTRAST: f64 = 1.5;
/// Renderable terminal content.
///
/// This provides the terminal cursor and an iterator over all non-empty cells.
pub struct RenderableContent<'a> {
terminal_content: TerminalContent<'a>,
cursor: RenderableCursor,
cursor_shape: CursorShape,
cursor_point: Point<usize>,
search: Option<HintMatches<'a>>,
hint: Option<Hint<'a>>,
config: &'a UiConfig,
colors: &'a List,
focused_match: Option<&'a Match>,
size: &'a SizeInfo,
}
impl<'a> RenderableContent<'a> {
pub fn new<T: EventListener>(
config: &'a UiConfig,
display: &'a mut Display,
term: &'a Term<T>,
search_state: &'a mut SearchState,
) -> Self {
let search = search_state.dfas().map(|dfas| HintMatches::visible_regex_matches(term, dfas));
let focused_match = search_state.focused_match();
let terminal_content = term.renderable_content();
// Find terminal cursor shape.
let cursor_shape = if terminal_content.cursor.shape == CursorShape::Hidden
|| display.cursor_hidden
|| search_state.regex().is_some()
|| display.ime.preedit().is_some()
{
CursorShape::Hidden
} else if !term.is_focused && config.cursor.unfocused_hollow {
CursorShape::HollowBlock
} else {
terminal_content.cursor.shape
};
// Convert terminal cursor point to viewport position.
let cursor_point = terminal_content.cursor.point;
let display_offset = terminal_content.display_offset;
let cursor_point = term::point_to_viewport(display_offset, cursor_point).unwrap();
let hint = if display.hint_state.active() {
display.hint_state.update_matches(term);
Some(Hint::from(&display.hint_state))
} else {
None
};
Self {
colors: &display.colors,
size: &display.size_info,
cursor: RenderableCursor::new_hidden(),
terminal_content,
focused_match,
cursor_shape,
cursor_point,
search,
config,
hint,
}
}
/// Viewport offset.
pub fn display_offset(&self) -> usize {
self.terminal_content.display_offset
}
/// Get the terminal cursor.
pub fn cursor(mut self) -> RenderableCursor {
// Assure this function is only called after the iterator has been drained.
debug_assert!(self.next().is_none());
self.cursor
}
/// Get the RGB value for a color index.
pub fn color(&self, color: usize) -> Rgb {
self.terminal_content.colors[color].map(Rgb).unwrap_or(self.colors[color])
}
pub fn selection_range(&self) -> Option<SelectionRange> {
self.terminal_content.selection
}
/// Assemble the information required to render the terminal cursor.
fn renderable_cursor(&mut self, cell: &RenderableCell) -> RenderableCursor {
// Cursor colors.
let color = if self.terminal_content.mode.contains(TermMode::VI) {
self.config.colors.vi_mode_cursor
} else {
self.config.colors.cursor
};
let cursor_color = self.terminal_content.colors[NamedColor::Cursor]
.map_or(color.background, |c| CellRgb::Rgb(Rgb(c)));
let text_color = color.foreground;
let insufficient_contrast = (!matches!(cursor_color, CellRgb::Rgb(_))
|| !matches!(text_color, CellRgb::Rgb(_)))
&& cell.fg.contrast(*cell.bg) < MIN_CURSOR_CONTRAST;
// Convert from cell colors to RGB.
let mut text_color = text_color.color(cell.fg, cell.bg);
let mut cursor_color = cursor_color.color(cell.fg, cell.bg);
// Invert cursor color with insufficient contrast to prevent invisible cursors.
if insufficient_contrast {
cursor_color = self.config.colors.primary.foreground;
text_color = self.config.colors.primary.background;
}
let width = if cell.flags.contains(Flags::WIDE_CHAR) {
NonZeroU32::new(2).unwrap()
} else {
NonZeroU32::new(1).unwrap()
};
RenderableCursor {
width,
shape: self.cursor_shape,
point: self.cursor_point,
cursor_color,
text_color,
}
}
}
impl Iterator for RenderableContent<'_> {
type Item = RenderableCell;
/// Gets the next renderable cell.
///
/// Skips empty (background) cells and applies any flags to the cell state
/// (eg. invert fg and bg colors).
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
let cell = self.terminal_content.display_iter.next()?;
let mut cell = RenderableCell::new(self, cell);
if self.cursor_point == cell.point {
// Store the cursor which should be rendered.
self.cursor = self.renderable_cursor(&cell);
if self.cursor.shape == CursorShape::Block {
cell.fg = self.cursor.text_color;
cell.bg = self.cursor.cursor_color;
// Since we draw Block cursor by drawing cell below it with a proper color,
// we must adjust alpha to make it visible.
cell.bg_alpha = 1.;
}
return Some(cell);
} else if !cell.is_empty() && !cell.flags.contains(Flags::WIDE_CHAR_SPACER) {
// Skip empty cells and wide char spacers.
return Some(cell);
}
}
}
}
/// Cell ready for rendering.
#[derive(Clone, Debug)]
pub struct RenderableCell {
pub character: char,
pub point: Point<usize>,
pub fg: Rgb,
pub bg: Rgb,
pub bg_alpha: f32,
pub underline: Rgb,
pub flags: Flags,
pub extra: Option<Box<RenderableCellExtra>>,
}
/// Extra storage with rarely present fields for [`RenderableCell`], to reduce the cell size we
/// pass around.
#[derive(Clone, Debug)]
pub struct RenderableCellExtra {
pub zerowidth: Option<Vec<char>>,
pub hyperlink: Option<Hyperlink>,
}
impl RenderableCell {
fn new(content: &mut RenderableContent<'_>, cell: Indexed<&Cell>) -> Self {
// Lookup RGB values.
let mut fg = Self::compute_fg_rgb(content, cell.fg, cell.flags);
let mut bg = Self::compute_bg_rgb(content, cell.bg);
let mut bg_alpha = if cell.flags.contains(Flags::INVERSE) {
mem::swap(&mut fg, &mut bg);
1.0
} else {
Self::compute_bg_alpha(content.config, cell.bg)
};
let is_selected = content.terminal_content.selection.is_some_and(|selection| {
selection.contains_cell(
&cell,
content.terminal_content.cursor.point,
content.cursor_shape,
)
});
let display_offset = content.terminal_content.display_offset;
let viewport_start = Point::new(Line(-(display_offset as i32)), Column(0));
let colors = &content.config.colors;
let mut character = cell.c;
let mut flags = cell.flags;
let num_cols = content.size.columns();
if let Some((c, is_first)) = content
.hint
.as_mut()
.and_then(|hint| hint.advance(viewport_start, num_cols, cell.point))
{
if is_first {
let (config_fg, config_bg) =
(colors.hints.start.foreground, colors.hints.start.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else if c.is_some() {
let (config_fg, config_bg) =
(colors.hints.end.foreground, colors.hints.end.background);
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
} else {
flags.insert(Flags::UNDERLINE);
}
character = c.unwrap_or(character);
} else if is_selected {
let config_fg = colors.selection.foreground;
let config_bg = colors.selection.background;
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
if fg == bg && !cell.flags.contains(Flags::HIDDEN) {
// Reveal inversed text when fg/bg is the same.
fg = content.color(NamedColor::Background as usize);
bg = content.color(NamedColor::Foreground as usize);
bg_alpha = 1.0;
}
} else if content.search.as_mut().is_some_and(|search| search.advance(cell.point)) {
let focused = content.focused_match.is_some_and(|fm| fm.contains(&cell.point));
let (config_fg, config_bg) = if focused {
(colors.search.focused_match.foreground, colors.search.focused_match.background)
} else {
(colors.search.matches.foreground, colors.search.matches.background)
};
Self::compute_cell_rgb(&mut fg, &mut bg, &mut bg_alpha, config_fg, config_bg);
}
// Apply transparency to all renderable cells if `transparent_background_colors` is set
if bg_alpha > 0. && content.config.colors.transparent_background_colors {
bg_alpha = content.config.window_opacity();
}
// Convert cell point to viewport position.
let cell_point = cell.point;
let point = term::point_to_viewport(display_offset, cell_point).unwrap();
let underline = cell
.underline_color()
.map_or(fg, |underline| Self::compute_fg_rgb(content, underline, flags));
let zerowidth = cell.zerowidth();
let hyperlink = cell.hyperlink();
let extra = (zerowidth.is_some() || hyperlink.is_some()).then(|| {
Box::new(RenderableCellExtra {
zerowidth: zerowidth.map(|zerowidth| zerowidth.to_vec()),
hyperlink,
})
});
RenderableCell { flags, character, bg_alpha, point, fg, bg, underline, extra }
}
/// Check if cell contains any renderable content.
fn is_empty(&self) -> bool {
self.bg_alpha == 0.
&& self.character == ' '
&& self.extra.is_none()
&& !self.flags.intersects(Flags::ALL_UNDERLINES | Flags::STRIKEOUT)
}
/// Apply [`CellRgb`] colors to the cell's colors.
fn compute_cell_rgb(
cell_fg: &mut Rgb,
cell_bg: &mut Rgb,
bg_alpha: &mut f32,
fg: CellRgb,
bg: CellRgb,
) {
let old_fg = mem::replace(cell_fg, fg.color(*cell_fg, *cell_bg));
*cell_bg = bg.color(old_fg, *cell_bg);
if bg != CellRgb::CellBackground {
*bg_alpha = 1.0;
}
}
/// Get the RGB color from a cell's foreground color.
fn compute_fg_rgb(content: &RenderableContent<'_>, fg: Color, flags: Flags) -> Rgb {
let config = &content.config;
match fg {
Color::Spec(rgb) => match flags & Flags::DIM {
Flags::DIM => {
let rgb: Rgb = rgb.into();
rgb * DIM_FACTOR
},
_ => rgb.into(),
},
Color::Named(ansi) => {
match (config.colors.draw_bold_text_with_bright_colors, flags & Flags::DIM_BOLD) {
// If no bright foreground is set, treat it like the BOLD flag doesn't exist.
(_, Flags::DIM_BOLD)
if ansi == NamedColor::Foreground
&& config.colors.primary.bright_foreground.is_none() =>
{
content.color(NamedColor::DimForeground as usize)
},
// Draw bold text in bright colors *and* contains bold flag.
(true, Flags::BOLD) => content.color(ansi.to_bright() as usize),
// Cell is marked as dim and not bold.
(_, Flags::DIM) | (false, Flags::DIM_BOLD) => {
content.color(ansi.to_dim() as usize)
},
// None of the above, keep original color..
_ => content.color(ansi as usize),
}
},
Color::Indexed(idx) => {
let idx = match (
config.colors.draw_bold_text_with_bright_colors,
flags & Flags::DIM_BOLD,
idx,
) {
(true, Flags::BOLD, 0..=7) => idx as usize + 8,
(false, Flags::DIM, 8..=15) => idx as usize - 8,
(false, Flags::DIM, 0..=7) => NamedColor::DimBlack as usize + idx as usize,
_ => idx as usize,
};
content.color(idx)
},
}
}
/// Get the RGB color from a cell's background color.
#[inline]
fn compute_bg_rgb(content: &RenderableContent<'_>, bg: Color) -> Rgb {
match bg {
Color::Spec(rgb) => rgb.into(),
Color::Named(ansi) => content.color(ansi as usize),
Color::Indexed(idx) => content.color(idx as usize),
}
}
/// Compute background alpha based on cell's original color.
///
/// Since an RGB color matching the background should not be transparent, this is computed
/// using the named input color, rather than checking the RGB of the background after its color
/// is computed.
#[inline]
fn compute_bg_alpha(config: &UiConfig, bg: Color) -> f32 {
if bg == Color::Named(NamedColor::Background) {
0.
} else if config.colors.transparent_background_colors {
config.window_opacity()
} else {
1.
}
}
}
/// Cursor storing all information relevant for rendering.
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct RenderableCursor {
shape: CursorShape,
cursor_color: Rgb,
text_color: Rgb,
width: NonZeroU32,
point: Point<usize>,
}
impl RenderableCursor {
fn new_hidden() -> Self {
let shape = CursorShape::Hidden;
let cursor_color = Rgb::default();
let text_color = Rgb::default();
let width = NonZeroU32::new(1).unwrap();
let point = Point::default();
Self { shape, cursor_color, text_color, width, point }
}
}
impl RenderableCursor {
pub fn new(
point: Point<usize>,
shape: CursorShape,
cursor_color: Rgb,
width: NonZeroU32,
) -> Self {
Self { shape, cursor_color, text_color: cursor_color, width, point }
}
pub fn color(&self) -> Rgb {
self.cursor_color
}
pub fn shape(&self) -> CursorShape {
self.shape
}
pub fn width(&self) -> NonZeroU32 {
self.width
}
pub fn point(&self) -> Point<usize> {
self.point
}
}
/// Regex hints for keyboard shortcuts.
struct Hint<'a> {
/// Hint matches and position.
matches: HintMatches<'a>,
/// Last match checked against current cell position.
labels: &'a Vec<Vec<char>>,
}
impl Hint<'_> {
/// Advance the hint iterator.
///
/// If the point is within a hint, the keyboard shortcut character that should be displayed at
/// this position will be returned.
///
/// The tuple's [`bool`] will be `true` when the character is the first for this hint.
///
/// The tuple's [`Option<char>`] will be [`None`] when the point is part of the match, but not
/// part of the hint label.
fn advance(
&mut self,
viewport_start: Point,
num_cols: usize,
point: Point,
) -> Option<(Option<char>, bool)> {
// Check if we're within a match at all.
if !self.matches.advance(point) {
return None;
}
// Match starting position on this line; linebreaks interrupt the hint labels.
let start = self
.matches
.get(self.matches.index)
.map(|bounds| cmp::max(*bounds.start(), viewport_start))?;
// Position within the hint label.
let line_delta = point.line.0 - start.line.0;
let col_delta = point.column.0 as i32 - start.column.0 as i32;
let label_position = usize::try_from(line_delta * num_cols as i32 + col_delta).unwrap_or(0);
let is_first = label_position == 0;
// Hint label character.
let hint_char = self.labels[self.matches.index]
.get(label_position)
.copied()
.map(|c| (Some(c), is_first))
.unwrap_or((None, false));
Some(hint_char)
}
}
impl<'a> From<&'a HintState> for Hint<'a> {
fn from(hint_state: &'a HintState) -> Self {
let matches = HintMatches::new(hint_state.matches());
Self { labels: hint_state.labels(), matches }
}
}
/// Visible hint match tracking.
#[derive(Default)]
struct HintMatches<'a> {
/// All visible matches.
matches: Cow<'a, [Match]>,
/// Index of the last match checked.
index: usize,
}
impl<'a> HintMatches<'a> {
/// Create new renderable matches iterator..
fn new(matches: impl Into<Cow<'a, [Match]>>) -> Self {
Self { matches: matches.into(), index: 0 }
}
/// Create from regex matches on term visible part.
fn visible_regex_matches<T>(term: &Term<T>, dfas: &mut RegexSearch) -> Self {
let matches = hint::visible_regex_match_iter(term, dfas).collect::<Vec<_>>();
Self::new(matches)
}
/// Advance the regex tracker to the next point.
///
/// This will return `true` if the point passed is part of a regex match.
fn advance(&mut self, point: Point) -> bool {
while let Some(bounds) = self.get(self.index) {
if bounds.start() > &point {
break;
} else if bounds.end() < &point {
self.index += 1;
} else {
return true;
}
}
false
}
}
impl Deref for HintMatches<'_> {
type Target = [Match];
fn deref(&self) -> &Self::Target {
self.matches.deref()
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "viewport_start",
"type": "Point"
},
{
"definitions": [
"pub struct Point<L = Line, C = Column> {\n pub line: L,\n pub column: C,\n}"
],
"name": "point",
"type": "Point"
}
],
"end_line": 497,
"name": "advance",
"signature": "fn advance(\n &mut self,\n viewport_start: Point,\n num_cols: usize,\n point: Point,\n ) -> Option<(Option<char>, bool)>",
"start_line": 466
} | {
"class_name": "impl Hint<'_> {\n /// Advance the hint iterator.\n ///\n /// If the point is within a hint, the keyboard shortcut character that should be displayed at\n /// this position will be returned.\n ///\n /// The tuple's [`bool`] will be `true` when the character is the first for this hint.\n ///\n /// The tuple's [`Option<char>`] will be [`None`] when the point is part of the match, but not\n /// part of the hint label.\n fn advance(\n &mut self,\n viewport_start: Point,\n num_cols: usize,\n point: Point,\n ) -> Option<(Option<char>, bool)> {\n // Check if we're within a match at all.\n if !self.matches.advance(point) {\n return None;\n }\n\n // Match starting position on this line; linebreaks interrupt the hint labels.\n let start = self\n .matches\n .get(self.matches.index)\n .map(|bounds| cmp::max(*bounds.start(), viewport_start))?;\n\n // Position within the hint label.\n let line_delta = point.line.0 - start.line.0;\n let col_delta = point.column.0 as i32 - start.column.0 as i32;\n let label_position = usize::try_from(line_delta * num_cols as i32 + col_delta).unwrap_or(0);\n let is_first = label_position == 0;\n\n // Hint label character.\n let hint_char = self.labels[self.matches.index]\n .get(label_position)\n .copied()\n .map(|c| (Some(c), is_first))\n .unwrap_or((None, false));\n\n Some(hint_char)\n }\n}",
"class_signature": "impl Hint<'_>"
} |
get_platform_window | alacritty-master/alacritty/src/display/window.rs | pub fn get_platform_window(
identity: &Identity,
window_config: &WindowConfig,
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))] x11_visual: Option<
X11VisualInfo,
>,
) -> WindowAttributes {
#[cfg(feature = "x11")]
let icon = {
let mut decoder = Decoder::new(Cursor::new(WINDOW_ICON));
decoder.set_transformations(png::Transformations::normalize_to_color8());
let mut reader = decoder.read_info().expect("invalid embedded icon");
let mut buf = vec![0; reader.output_buffer_size()];
let _ = reader.next_frame(&mut buf);
Icon::from_rgba(buf, reader.info().width, reader.info().height)
.expect("invalid embedded icon format")
};
let builder = WinitWindow::default_attributes()
.with_name(&identity.class.general, &identity.class.instance)
.with_decorations(window_config.decorations != Decorations::None);
#[cfg(feature = "x11")]
let builder = builder.with_window_icon(Some(icon));
#[cfg(feature = "x11")]
let builder = match x11_visual {
Some(visual) => builder.with_x11_visual(visual.visual_id() as u32),
None => builder,
};
builder
} | #[cfg(not(any(target_os = "macos", windows)))]
use winit::platform::startup_notify::{
self, EventLoopExtStartupNotify, WindowAttributesExtStartupNotify,
};
#[cfg(not(any(target_os = "macos", windows)))]
use winit::window::ActivationToken;
#[cfg(all(not(feature = "x11"), not(any(target_os = "macos", windows))))]
use winit::platform::wayland::WindowAttributesExtWayland;
#[rustfmt::skip]
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))]
use {
std::io::Cursor,
winit::platform::x11::{WindowAttributesExtX11, ActiveEventLoopExtX11},
glutin::platform::x11::X11VisualInfo,
winit::window::Icon,
png::Decoder,
};
use std::fmt::{self, Display, Formatter};
#[cfg(target_os = "macos")]
use {
objc2::MainThreadMarker,
objc2_app_kit::{NSColorSpace, NSView},
winit::platform::macos::{OptionAsAlt, WindowAttributesExtMacOS, WindowExtMacOS},
};
use winit::dpi::{PhysicalPosition, PhysicalSize};
use winit::event_loop::ActiveEventLoop;
use winit::monitor::MonitorHandle;
#[cfg(windows)]
use winit::platform::windows::{IconExtWindows, WindowAttributesExtWindows};
use winit::raw_window_handle::{HasWindowHandle, RawWindowHandle};
use winit::window::{
CursorIcon, Fullscreen, ImePurpose, Theme, UserAttentionType, Window as WinitWindow,
WindowAttributes, WindowId,
};
use alacritty_terminal::index::Point;
use crate::cli::WindowOptions;
use crate::config::window::{Decorations, Identity, WindowConfig};
use crate::config::UiConfig;
use crate::display::SizeInfo;
/// Window icon for `_NET_WM_ICON` property.
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))]
const WINDOW_ICON: &[u8] = include_bytes!("../../extra/logo/compat/alacritty-term.png");
/// This should match the definition of IDI_ICON from `alacritty.rc`.
#[cfg(windows)]
const IDI_ICON: u16 = 0x101;
/// Window errors.
#[derive(Debug)]
pub enum Error {
/// Error creating the window.
WindowCreation(winit::error::OsError),
/// Error dealing with fonts.
Font(crossfont::Error),
}
/// Result of fallible operations concerning a Window.
type Result<T> = std::result::Result<T, Error>;
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::WindowCreation(err) => err.source(),
Error::Font(err) => err.source(),
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::WindowCreation(err) => write!(f, "Error creating GL context; {err}"),
Error::Font(err) => err.fmt(f),
}
}
}
impl From<winit::error::OsError> for Error {
fn from(val: winit::error::OsError) -> Self {
Error::WindowCreation(val)
}
}
impl From<crossfont::Error> for Error {
fn from(val: crossfont::Error) -> Self {
Error::Font(val)
}
}
/// A window which can be used for displaying the terminal.
///
/// Wraps the underlying windowing library to provide a stable API in Alacritty.
pub struct Window {
/// Flag tracking that we have a frame we can draw.
pub has_frame: bool,
/// Cached scale factor for quickly scaling pixel sizes.
pub scale_factor: f64,
/// Flag indicating whether redraw was requested.
pub requested_redraw: bool,
/// Hold the window when terminal exits.
pub hold: bool,
window: WinitWindow,
/// Current window title.
title: String,
is_x11: bool,
current_mouse_cursor: CursorIcon,
mouse_visible: bool,
}
impl Window {
/// Create a new window.
///
/// This creates a window and fully initializes a window.
pub fn new(
event_loop: &ActiveEventLoop,
config: &UiConfig,
identity: &Identity,
options: &mut WindowOptions,
#[rustfmt::skip]
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))]
x11_visual: Option<X11VisualInfo>,
) -> Result<Window> {
let identity = identity.clone();
let mut window_attributes = Window::get_platform_window(
&identity,
&config.window,
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))]
x11_visual,
#[cfg(target_os = "macos")]
&options.window_tabbing_id.take(),
);
if let Some(position) = config.window.position {
window_attributes = window_attributes
.with_position(PhysicalPosition::<i32>::from((position.x, position.y)));
}
#[cfg(not(any(target_os = "macos", windows)))]
if let Some(token) = options
.activation_token
.take()
.map(ActivationToken::from_raw)
.or_else(|| event_loop.read_token_from_env())
{
log::debug!("Activating window with token: {token:?}");
window_attributes = window_attributes.with_activation_token(token);
// Remove the token from the env.
startup_notify::reset_activation_token_env();
}
// On X11, embed the window inside another if the parent ID has been set.
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))]
if let Some(parent_window_id) = event_loop.is_x11().then_some(config.window.embed).flatten()
{
window_attributes = window_attributes.with_embed_parent_window(parent_window_id);
}
window_attributes = window_attributes
.with_title(&identity.title)
.with_theme(config.window.theme())
.with_visible(false)
.with_transparent(true)
.with_blur(config.window.blur)
.with_maximized(config.window.maximized())
.with_fullscreen(config.window.fullscreen())
.with_window_level(config.window.level.into());
let window = event_loop.create_window(window_attributes)?;
// Text cursor.
let current_mouse_cursor = CursorIcon::Text;
window.set_cursor(current_mouse_cursor);
// Enable IME.
window.set_ime_allowed(true);
window.set_ime_purpose(ImePurpose::Terminal);
// Set initial transparency hint.
window.set_transparent(config.window_opacity() < 1.);
#[cfg(target_os = "macos")]
use_srgb_color_space(&window);
let scale_factor = window.scale_factor();
log::info!("Window scale factor: {}", scale_factor);
let is_x11 = matches!(window.window_handle().unwrap().as_raw(), RawWindowHandle::Xlib(_));
Ok(Self {
hold: options.terminal_options.hold,
requested_redraw: false,
title: identity.title,
current_mouse_cursor,
mouse_visible: true,
has_frame: true,
scale_factor,
window,
is_x11,
})
}
#[inline]
pub fn raw_window_handle(&self) -> RawWindowHandle {
self.window.window_handle().unwrap().as_raw()
}
#[inline]
pub fn request_inner_size(&self, size: PhysicalSize<u32>) {
let _ = self.window.request_inner_size(size);
}
#[inline]
pub fn inner_size(&self) -> PhysicalSize<u32> {
self.window.inner_size()
}
#[inline]
pub fn set_visible(&self, visibility: bool) {
self.window.set_visible(visibility);
}
#[cfg(target_os = "macos")]
#[inline]
pub fn focus_window(&self) {
self.window.focus_window();
}
/// Set the window title.
#[inline]
pub fn set_title(&mut self, title: String) {
self.title = title;
self.window.set_title(&self.title);
}
/// Get the window title.
#[inline]
pub fn title(&self) -> &str {
&self.title
}
#[inline]
pub fn request_redraw(&mut self) {
if !self.requested_redraw {
self.requested_redraw = true;
self.window.request_redraw();
}
}
#[inline]
pub fn set_mouse_cursor(&mut self, cursor: CursorIcon) {
if cursor != self.current_mouse_cursor {
self.current_mouse_cursor = cursor;
self.window.set_cursor(cursor);
}
}
/// Set mouse cursor visible.
pub fn set_mouse_visible(&mut self, visible: bool) {
if visible != self.mouse_visible {
self.mouse_visible = visible;
self.window.set_cursor_visible(visible);
}
}
#[cfg(not(any(target_os = "macos", windows)))]
pub fn get_platform_window(
identity: &Identity,
window_config: &WindowConfig,
#[cfg(all(feature = "x11", not(any(target_os = "macos", windows))))] x11_visual: Option<
X11VisualInfo,
>,
) -> WindowAttributes {
#[cfg(feature = "x11")]
let icon = {
let mut decoder = Decoder::new(Cursor::new(WINDOW_ICON));
decoder.set_transformations(png::Transformations::normalize_to_color8());
let mut reader = decoder.read_info().expect("invalid embedded icon");
let mut buf = vec![0; reader.output_buffer_size()];
let _ = reader.next_frame(&mut buf);
Icon::from_rgba(buf, reader.info().width, reader.info().height)
.expect("invalid embedded icon format")
};
let builder = WinitWindow::default_attributes()
.with_name(&identity.class.general, &identity.class.instance)
.with_decorations(window_config.decorations != Decorations::None);
#[cfg(feature = "x11")]
let builder = builder.with_window_icon(Some(icon));
#[cfg(feature = "x11")]
let builder = match x11_visual {
Some(visual) => builder.with_x11_visual(visual.visual_id() as u32),
None => builder,
};
builder
}
#[cfg(windows)]
pub fn get_platform_window(_: &Identity, window_config: &WindowConfig) -> WindowAttributes {
let icon = winit::window::Icon::from_resource(IDI_ICON, None);
WinitWindow::default_attributes()
.with_decorations(window_config.decorations != Decorations::None)
.with_window_icon(icon.as_ref().ok().cloned())
.with_taskbar_icon(icon.ok())
}
#[cfg(target_os = "macos")]
pub fn get_platform_window(
_: &Identity,
window_config: &WindowConfig,
tabbing_id: &Option<String>,
) -> WindowAttributes {
let mut window =
WinitWindow::default_attributes().with_option_as_alt(window_config.option_as_alt());
if let Some(tabbing_id) = tabbing_id {
window = window.with_tabbing_identifier(tabbing_id);
}
match window_config.decorations {
Decorations::Full => window,
Decorations::Transparent => window
.with_title_hidden(true)
.with_titlebar_transparent(true)
.with_fullsize_content_view(true),
Decorations::Buttonless => window
.with_title_hidden(true)
.with_titlebar_buttons_hidden(true)
.with_titlebar_transparent(true)
.with_fullsize_content_view(true),
Decorations::None => window.with_titlebar_hidden(true),
}
}
pub fn set_urgent(&self, is_urgent: bool) {
let attention = if is_urgent { Some(UserAttentionType::Critical) } else { None };
self.window.request_user_attention(attention);
}
pub fn id(&self) -> WindowId {
self.window.id()
}
pub fn set_transparent(&self, transparent: bool) {
self.window.set_transparent(transparent);
}
pub fn set_blur(&self, blur: bool) {
self.window.set_blur(blur);
}
pub fn set_maximized(&self, maximized: bool) {
self.window.set_maximized(maximized);
}
pub fn set_minimized(&self, minimized: bool) {
self.window.set_minimized(minimized);
}
pub fn set_resize_increments(&self, increments: PhysicalSize<f32>) {
self.window.set_resize_increments(Some(increments));
}
/// Toggle the window's fullscreen state.
pub fn toggle_fullscreen(&self) {
self.set_fullscreen(self.window.fullscreen().is_none());
}
/// Toggle the window's maximized state.
pub fn toggle_maximized(&self) {
self.set_maximized(!self.window.is_maximized());
}
/// Inform windowing system about presenting to the window.
///
/// Should be called right before presenting to the window with e.g. `eglSwapBuffers`.
pub fn pre_present_notify(&self) {
self.window.pre_present_notify();
}
pub fn set_theme(&self, theme: Option<Theme>) {
self.window.set_theme(theme);
}
#[cfg(target_os = "macos")]
pub fn toggle_simple_fullscreen(&self) {
self.set_simple_fullscreen(!self.window.simple_fullscreen());
}
#[cfg(target_os = "macos")]
pub fn set_option_as_alt(&self, option_as_alt: OptionAsAlt) {
self.window.set_option_as_alt(option_as_alt);
}
pub fn set_fullscreen(&self, fullscreen: bool) {
if fullscreen {
self.window.set_fullscreen(Some(Fullscreen::Borderless(None)));
} else {
self.window.set_fullscreen(None);
}
}
pub fn current_monitor(&self) -> Option<MonitorHandle> {
self.window.current_monitor()
}
#[cfg(target_os = "macos")]
pub fn set_simple_fullscreen(&self, simple_fullscreen: bool) {
self.window.set_simple_fullscreen(simple_fullscreen);
}
pub fn set_ime_allowed(&self, allowed: bool) {
// Skip runtime IME manipulation on X11 since it breaks some IMEs.
if !self.is_x11 {
self.window.set_ime_allowed(allowed);
}
}
/// Adjust the IME editor position according to the new location of the cursor.
pub fn update_ime_position(&self, point: Point<usize>, size: &SizeInfo) {
// NOTE: X11 doesn't support cursor area, so we need to offset manually to not obscure
// the text.
let offset = if self.is_x11 { 1 } else { 0 };
let nspot_x = f64::from(size.padding_x() + point.column.0 as f32 * size.cell_width());
let nspot_y =
f64::from(size.padding_y() + (point.line + offset) as f32 * size.cell_height());
// NOTE: some compositors don't like excluding too much and try to render popup at the
// bottom right corner of the provided area, so exclude just the full-width char to not
// obscure the cursor and not render popup at the end of the window.
let width = size.cell_width() as f64 * 2.;
let height = size.cell_height as f64;
self.window.set_ime_cursor_area(
PhysicalPosition::new(nspot_x, nspot_y),
PhysicalSize::new(width, height),
);
}
/// Disable macOS window shadows.
///
/// This prevents rendering artifacts from showing up when the window is transparent.
#[cfg(target_os = "macos")]
pub fn set_has_shadow(&self, has_shadows: bool) {
let view = match self.raw_window_handle() {
RawWindowHandle::AppKit(handle) => {
assert!(MainThreadMarker::new().is_some());
unsafe { handle.ns_view.cast::<NSView>().as_ref() }
},
_ => return,
};
view.window().unwrap().setHasShadow(has_shadows);
}
/// Select tab at the given `index`.
#[cfg(target_os = "macos")]
pub fn select_tab_at_index(&self, index: usize) {
self.window.select_tab_at_index(index);
}
/// Select the last tab.
#[cfg(target_os = "macos")]
pub fn select_last_tab(&self) {
self.window.select_tab_at_index(self.window.num_tabs() - 1);
}
/// Select next tab.
#[cfg(target_os = "macos")]
pub fn select_next_tab(&self) {
self.window.select_next_tab();
}
/// Select previous tab.
#[cfg(target_os = "macos")]
pub fn select_previous_tab(&self) {
self.window.select_previous_tab();
}
#[cfg(target_os = "macos")]
pub fn tabbing_id(&self) -> String {
self.window.tabbing_identifier()
}
}
#[cfg(target_os = "macos")]
fn use_srgb_color_space(window: &WinitWindow) {
let view = match window.window_handle().unwrap().as_raw() {
RawWindowHandle::AppKit(handle) => {
assert!(MainThreadMarker::new().is_some());
unsafe { handle.ns_view.cast::<NSView>().as_ref() }
},
_ => return,
};
unsafe {
view.window().unwrap().setColorSpace(Some(&NSColorSpace::sRGBColorSpace()));
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct Identity {\n /// Window title.\n pub title: String,\n\n /// Window class.\n pub class: Class,\n}"
],
"name": "identity",
"type": "&Identity"
},
{
"definitions": [
"pub struct WindowConfig {\n /// Initial position.\n pub position: Option<Delta<i32>>,\n\n /// Draw the window with title bar / borders.\n pub decorations: Decorations,\n\n /// Startup mode.\n pub startup_mode: StartupMode,\n\n /// XEmbed parent.\n #[config(skip)]\n pub embed: Option<u32>,\n\n /// Spread out additional padding evenly.\n pub dynamic_padding: bool,\n\n /// Use dynamic title.\n pub dynamic_title: bool,\n\n /// Information to identify a particular window.\n #[config(flatten)]\n pub identity: Identity,\n\n /// Background opacity from 0.0 to 1.0.\n pub opacity: Percentage,\n\n /// Request blur behind the window.\n pub blur: bool,\n\n /// Controls which `Option` key should be treated as `Alt`.\n option_as_alt: OptionAsAlt,\n\n /// Resize increments.\n pub resize_increments: bool,\n\n /// Pixel padding.\n padding: Delta<u16>,\n\n /// Initial dimensions.\n dimensions: Dimensions,\n\n /// System decorations theme variant.\n decorations_theme_variant: Option<Theme>,\n\n /// Window level.\n pub level: WindowLevel,\n}"
],
"name": "window_config",
"type": "&WindowConfig"
},
{
"definitions": [
"pub enum Option<T> {\n /// No value.\n #[lang = \"None\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n None,\n /// Some value of type `T`.\n #[lang = \"Some\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n Some(#[stable(feature = \"rust1\", since = \"1.0.0\")] T),\n}",
"pub struct X11VisualInfo {\n raw: *const XVisualInfo,\n transparency: bool,\n}"
],
"name": "x11_visual",
"type": "Option< X11VisualInfo, >"
}
],
"end_line": 313,
"name": "get_platform_window",
"signature": "pub fn get_platform_window(\n identity: &Identity,\n window_config: &WindowConfig,\n #[cfg(all(feature = \"x11\", not(any(target_os = \"macos\", windows))))] x11_visual: Option<\n X11VisualInfo,\n >,\n ) -> WindowAttributes",
"start_line": 281
} | {
"class_name": "impl Window {\n /// Create a new window.\n ///\n /// This creates a window and fully initializes a window.\n pub fn new(\n event_loop: &ActiveEventLoop,\n config: &UiConfig,\n identity: &Identity,\n options: &mut WindowOptions,\n #[rustfmt::skip]\n #[cfg(all(feature = \"x11\", not(any(target_os = \"macos\", windows))))]\n x11_visual: Option<X11VisualInfo>,\n ) -> Result<Window> {\n let identity = identity.clone();\n let mut window_attributes = Window::get_platform_window(\n &identity,\n &config.window,\n #[cfg(all(feature = \"x11\", not(any(target_os = \"macos\", windows))))]\n x11_visual,\n #[cfg(target_os = \"macos\")]\n &options.window_tabbing_id.take(),\n );\n\n if let Some(position) = config.window.position {\n window_attributes = window_attributes\n .with_position(PhysicalPosition::<i32>::from((position.x, position.y)));\n }\n\n #[cfg(not(any(target_os = \"macos\", windows)))]\n if let Some(token) = options\n .activation_token\n .take()\n .map(ActivationToken::from_raw)\n .or_else(|| event_loop.read_token_from_env())\n {\n log::debug!(\"Activating window with token: {token:?}\");\n window_attributes = window_attributes.with_activation_token(token);\n\n // Remove the token from the env.\n startup_notify::reset_activation_token_env();\n }\n\n // On X11, embed the window inside another if the parent ID has been set.\n #[cfg(all(feature = \"x11\", not(any(target_os = \"macos\", windows))))]\n if let Some(parent_window_id) = event_loop.is_x11().then_some(config.window.embed).flatten()\n {\n window_attributes = window_attributes.with_embed_parent_window(parent_window_id);\n }\n\n window_attributes = window_attributes\n .with_title(&identity.title)\n .with_theme(config.window.theme())\n .with_visible(false)\n .with_transparent(true)\n .with_blur(config.window.blur)\n .with_maximized(config.window.maximized())\n .with_fullscreen(config.window.fullscreen())\n .with_window_level(config.window.level.into());\n\n let window = event_loop.create_window(window_attributes)?;\n\n // Text cursor.\n let current_mouse_cursor = CursorIcon::Text;\n window.set_cursor(current_mouse_cursor);\n\n // Enable IME.\n window.set_ime_allowed(true);\n window.set_ime_purpose(ImePurpose::Terminal);\n\n // Set initial transparency hint.\n window.set_transparent(config.window_opacity() < 1.);\n\n #[cfg(target_os = \"macos\")]\n use_srgb_color_space(&window);\n\n let scale_factor = window.scale_factor();\n log::info!(\"Window scale factor: {}\", scale_factor);\n let is_x11 = matches!(window.window_handle().unwrap().as_raw(), RawWindowHandle::Xlib(_));\n\n Ok(Self {\n hold: options.terminal_options.hold,\n requested_redraw: false,\n title: identity.title,\n current_mouse_cursor,\n mouse_visible: true,\n has_frame: true,\n scale_factor,\n window,\n is_x11,\n })\n }\n\n #[inline]\n pub fn raw_window_handle(&self) -> RawWindowHandle {\n self.window.window_handle().unwrap().as_raw()\n }\n\n #[inline]\n pub fn request_inner_size(&self, size: PhysicalSize<u32>) {\n let _ = self.window.request_inner_size(size);\n }\n\n #[inline]\n pub fn inner_size(&self) -> PhysicalSize<u32> {\n self.window.inner_size()\n }\n\n #[inline]\n pub fn set_visible(&self, visibility: bool) {\n self.window.set_visible(visibility);\n }\n\n #[cfg(target_os = \"macos\")]\n #[inline]\n pub fn focus_window(&self) {\n self.window.focus_window();\n }\n\n /// Set the window title.\n #[inline]\n pub fn set_title(&mut self, title: String) {\n self.title = title;\n self.window.set_title(&self.title);\n }\n\n /// Get the window title.\n #[inline]\n pub fn title(&self) -> &str {\n &self.title\n }\n\n #[inline]\n pub fn request_redraw(&mut self) {\n if !self.requested_redraw {\n self.requested_redraw = true;\n self.window.request_redraw();\n }\n }\n\n #[inline]\n pub fn set_mouse_cursor(&mut self, cursor: CursorIcon) {\n if cursor != self.current_mouse_cursor {\n self.current_mouse_cursor = cursor;\n self.window.set_cursor(cursor);\n }\n }\n\n /// Set mouse cursor visible.\n pub fn set_mouse_visible(&mut self, visible: bool) {\n if visible != self.mouse_visible {\n self.mouse_visible = visible;\n self.window.set_cursor_visible(visible);\n }\n }\n\n #[cfg(not(any(target_os = \"macos\", windows)))]\n pub fn get_platform_window(\n identity: &Identity,\n window_config: &WindowConfig,\n #[cfg(all(feature = \"x11\", not(any(target_os = \"macos\", windows))))] x11_visual: Option<\n X11VisualInfo,\n >,\n ) -> WindowAttributes {\n #[cfg(feature = \"x11\")]\n let icon = {\n let mut decoder = Decoder::new(Cursor::new(WINDOW_ICON));\n decoder.set_transformations(png::Transformations::normalize_to_color8());\n let mut reader = decoder.read_info().expect(\"invalid embedded icon\");\n let mut buf = vec![0; reader.output_buffer_size()];\n let _ = reader.next_frame(&mut buf);\n Icon::from_rgba(buf, reader.info().width, reader.info().height)\n .expect(\"invalid embedded icon format\")\n };\n\n let builder = WinitWindow::default_attributes()\n .with_name(&identity.class.general, &identity.class.instance)\n .with_decorations(window_config.decorations != Decorations::None);\n\n #[cfg(feature = \"x11\")]\n let builder = builder.with_window_icon(Some(icon));\n\n #[cfg(feature = \"x11\")]\n let builder = match x11_visual {\n Some(visual) => builder.with_x11_visual(visual.visual_id() as u32),\n None => builder,\n };\n\n builder\n }\n\n #[cfg(windows)]\n pub fn get_platform_window(_: &Identity, window_config: &WindowConfig) -> WindowAttributes {\n let icon = winit::window::Icon::from_resource(IDI_ICON, None);\n\n WinitWindow::default_attributes()\n .with_decorations(window_config.decorations != Decorations::None)\n .with_window_icon(icon.as_ref().ok().cloned())\n .with_taskbar_icon(icon.ok())\n }\n\n #[cfg(target_os = \"macos\")]\n pub fn get_platform_window(\n _: &Identity,\n window_config: &WindowConfig,\n tabbing_id: &Option<String>,\n ) -> WindowAttributes {\n let mut window =\n WinitWindow::default_attributes().with_option_as_alt(window_config.option_as_alt());\n\n if let Some(tabbing_id) = tabbing_id {\n window = window.with_tabbing_identifier(tabbing_id);\n }\n\n match window_config.decorations {\n Decorations::Full => window,\n Decorations::Transparent => window\n .with_title_hidden(true)\n .with_titlebar_transparent(true)\n .with_fullsize_content_view(true),\n Decorations::Buttonless => window\n .with_title_hidden(true)\n .with_titlebar_buttons_hidden(true)\n .with_titlebar_transparent(true)\n .with_fullsize_content_view(true),\n Decorations::None => window.with_titlebar_hidden(true),\n }\n }\n\n pub fn set_urgent(&self, is_urgent: bool) {\n let attention = if is_urgent { Some(UserAttentionType::Critical) } else { None };\n\n self.window.request_user_attention(attention);\n }\n\n pub fn id(&self) -> WindowId {\n self.window.id()\n }\n\n pub fn set_transparent(&self, transparent: bool) {\n self.window.set_transparent(transparent);\n }\n\n pub fn set_blur(&self, blur: bool) {\n self.window.set_blur(blur);\n }\n\n pub fn set_maximized(&self, maximized: bool) {\n self.window.set_maximized(maximized);\n }\n\n pub fn set_minimized(&self, minimized: bool) {\n self.window.set_minimized(minimized);\n }\n\n pub fn set_resize_increments(&self, increments: PhysicalSize<f32>) {\n self.window.set_resize_increments(Some(increments));\n }\n\n /// Toggle the window's fullscreen state.\n pub fn toggle_fullscreen(&self) {\n self.set_fullscreen(self.window.fullscreen().is_none());\n }\n\n /// Toggle the window's maximized state.\n pub fn toggle_maximized(&self) {\n self.set_maximized(!self.window.is_maximized());\n }\n\n /// Inform windowing system about presenting to the window.\n ///\n /// Should be called right before presenting to the window with e.g. `eglSwapBuffers`.\n pub fn pre_present_notify(&self) {\n self.window.pre_present_notify();\n }\n\n pub fn set_theme(&self, theme: Option<Theme>) {\n self.window.set_theme(theme);\n }\n\n #[cfg(target_os = \"macos\")]\n pub fn toggle_simple_fullscreen(&self) {\n self.set_simple_fullscreen(!self.window.simple_fullscreen());\n }\n\n #[cfg(target_os = \"macos\")]\n pub fn set_option_as_alt(&self, option_as_alt: OptionAsAlt) {\n self.window.set_option_as_alt(option_as_alt);\n }\n\n pub fn set_fullscreen(&self, fullscreen: bool) {\n if fullscreen {\n self.window.set_fullscreen(Some(Fullscreen::Borderless(None)));\n } else {\n self.window.set_fullscreen(None);\n }\n }\n\n pub fn current_monitor(&self) -> Option<MonitorHandle> {\n self.window.current_monitor()\n }\n\n #[cfg(target_os = \"macos\")]\n pub fn set_simple_fullscreen(&self, simple_fullscreen: bool) {\n self.window.set_simple_fullscreen(simple_fullscreen);\n }\n\n pub fn set_ime_allowed(&self, allowed: bool) {\n // Skip runtime IME manipulation on X11 since it breaks some IMEs.\n if !self.is_x11 {\n self.window.set_ime_allowed(allowed);\n }\n }\n\n /// Adjust the IME editor position according to the new location of the cursor.\n pub fn update_ime_position(&self, point: Point<usize>, size: &SizeInfo) {\n // NOTE: X11 doesn't support cursor area, so we need to offset manually to not obscure\n // the text.\n let offset = if self.is_x11 { 1 } else { 0 };\n let nspot_x = f64::from(size.padding_x() + point.column.0 as f32 * size.cell_width());\n let nspot_y =\n f64::from(size.padding_y() + (point.line + offset) as f32 * size.cell_height());\n\n // NOTE: some compositors don't like excluding too much and try to render popup at the\n // bottom right corner of the provided area, so exclude just the full-width char to not\n // obscure the cursor and not render popup at the end of the window.\n let width = size.cell_width() as f64 * 2.;\n let height = size.cell_height as f64;\n\n self.window.set_ime_cursor_area(\n PhysicalPosition::new(nspot_x, nspot_y),\n PhysicalSize::new(width, height),\n );\n }\n\n /// Disable macOS window shadows.\n ///\n /// This prevents rendering artifacts from showing up when the window is transparent.\n #[cfg(target_os = \"macos\")]\n pub fn set_has_shadow(&self, has_shadows: bool) {\n let view = match self.raw_window_handle() {\n RawWindowHandle::AppKit(handle) => {\n assert!(MainThreadMarker::new().is_some());\n unsafe { handle.ns_view.cast::<NSView>().as_ref() }\n },\n _ => return,\n };\n\n view.window().unwrap().setHasShadow(has_shadows);\n }\n\n /// Select tab at the given `index`.\n #[cfg(target_os = \"macos\")]\n pub fn select_tab_at_index(&self, index: usize) {\n self.window.select_tab_at_index(index);\n }\n\n /// Select the last tab.\n #[cfg(target_os = \"macos\")]\n pub fn select_last_tab(&self) {\n self.window.select_tab_at_index(self.window.num_tabs() - 1);\n }\n\n /// Select next tab.\n #[cfg(target_os = \"macos\")]\n pub fn select_next_tab(&self) {\n self.window.select_next_tab();\n }\n\n /// Select previous tab.\n #[cfg(target_os = \"macos\")]\n pub fn select_previous_tab(&self) {\n self.window.select_previous_tab();\n }\n\n #[cfg(target_os = \"macos\")]\n pub fn tabbing_id(&self) -> String {\n self.window.tabbing_identifier()\n }\n}",
"class_signature": "impl Window"
} |
should_build_sequence | alacritty-master/alacritty/src/input/keyboard.rs | fn should_build_sequence(
key: &KeyEvent,
text: &str,
mode: TermMode,
mods: ModifiersState,
) -> bool {
if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {
return true;
}
let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)
&& (key.logical_key == Key::Named(NamedKey::Escape)
|| key.location == KeyLocation::Numpad
|| (!mods.is_empty()
&& (mods != ModifiersState::SHIFT
|| matches!(
key.logical_key,
Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Backspace)
))));
match key.logical_key {
_ if disambiguate => true,
// Exclude all the named keys unless they have textual representation.
Key::Named(named) => named.to_text().is_none(),
_ => text.is_empty(),
}
} | use std::borrow::Cow;
use winit::event::{ElementState, KeyEvent};
#[cfg(target_os = "macos")]
use winit::keyboard::ModifiersKeyState;
use winit::keyboard::{Key, KeyLocation, ModifiersState, NamedKey};
#[cfg(target_os = "macos")]
use winit::platform::macos::OptionAsAlt;
use alacritty_terminal::event::EventListener;
use alacritty_terminal::term::TermMode;
use winit::platform::modifier_supplement::KeyEventExtModifierSupplement;
use crate::config::{Action, BindingKey, BindingMode, KeyBinding};
use crate::event::TYPING_SEARCH_DELAY;
use crate::input::{ActionContext, Execute, Processor};
use crate::scheduler::{TimerId, Topic};
impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {
/// Process key input.
pub fn key_input(&mut self, key: KeyEvent) {
// IME input will be applied on commit and shouldn't trigger key bindings.
if self.ctx.display().ime.preedit().is_some() {
return;
}
let mode = *self.ctx.terminal().mode();
let mods = self.ctx.modifiers().state();
if key.state == ElementState::Released {
if self.ctx.inline_search_state().char_pending {
self.ctx.window().set_ime_allowed(true);
}
self.key_release(key, mode, mods);
return;
}
let text = key.text_with_all_modifiers().unwrap_or_default();
// All key bindings are disabled while a hint is being selected.
if self.ctx.display().hint_state.active() {
for character in text.chars() {
self.ctx.hint_input(character);
}
return;
}
// First key after inline search is captured.
let inline_state = self.ctx.inline_search_state();
if inline_state.char_pending {
self.ctx.inline_search_input(text);
return;
}
// Reset search delay when the user is still typing.
self.reset_search_delay();
// Key bindings suppress the character input.
if self.process_key_bindings(&key) {
return;
}
if self.ctx.search_active() {
for character in text.chars() {
self.ctx.search_input(character);
}
return;
}
// Vi mode on its own doesn't have any input, the search input was done before.
if mode.contains(TermMode::VI) {
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let build_key_sequence = Self::should_build_sequence(&key, text, mode, mods);
let is_modifier_key = Self::is_modifier_key(&key);
let bytes = if build_key_sequence {
build_sequence(key, mods, mode)
} else {
let mut bytes = Vec::with_capacity(text.len() + 1);
if mods.alt_key() {
bytes.push(b'\x1b');
}
bytes.extend_from_slice(text.as_bytes());
bytes
};
// Write only if we have something to write.
if !bytes.is_empty() {
// Don't clear selection/scroll down when writing escaped modifier keys.
if !is_modifier_key {
self.ctx.on_terminal_input_start();
}
self.ctx.write_to_pty(bytes);
}
}
fn alt_send_esc(&mut self, key: &KeyEvent, text: &str) -> bool {
#[cfg(not(target_os = "macos"))]
let alt_send_esc = self.ctx.modifiers().state().alt_key();
#[cfg(target_os = "macos")]
let alt_send_esc = {
let option_as_alt = self.ctx.config().window.option_as_alt();
self.ctx.modifiers().state().alt_key()
&& (option_as_alt == OptionAsAlt::Both
|| (option_as_alt == OptionAsAlt::OnlyLeft
&& self.ctx.modifiers().lalt_state() == ModifiersKeyState::Pressed)
|| (option_as_alt == OptionAsAlt::OnlyRight
&& self.ctx.modifiers().ralt_state() == ModifiersKeyState::Pressed))
};
match key.logical_key {
Key::Named(named) => {
if named.to_text().is_some() {
alt_send_esc
} else {
// Treat `Alt` as modifier for named keys without text, like ArrowUp.
self.ctx.modifiers().state().alt_key()
}
},
_ => alt_send_esc && text.chars().count() == 1,
}
}
fn is_modifier_key(key: &KeyEvent) -> bool {
matches!(
key.logical_key.as_ref(),
Key::Named(NamedKey::Shift)
| Key::Named(NamedKey::Control)
| Key::Named(NamedKey::Alt)
| Key::Named(NamedKey::Super)
)
}
/// Check whether we should try to build escape sequence for the [`KeyEvent`].
fn should_build_sequence(
key: &KeyEvent,
text: &str,
mode: TermMode,
mods: ModifiersState,
) -> bool {
if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {
return true;
}
let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)
&& (key.logical_key == Key::Named(NamedKey::Escape)
|| key.location == KeyLocation::Numpad
|| (!mods.is_empty()
&& (mods != ModifiersState::SHIFT
|| matches!(
key.logical_key,
Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Backspace)
))));
match key.logical_key {
_ if disambiguate => true,
// Exclude all the named keys unless they have textual representation.
Key::Named(named) => named.to_text().is_none(),
_ => text.is_empty(),
}
}
/// Attempt to find a binding and execute its action.
///
/// The provided mode, mods, and key must match what is allowed by a binding
/// for its action to be executed.
fn process_key_bindings(&mut self, key: &KeyEvent) -> bool {
let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());
let mods = self.ctx.modifiers().state();
// Don't suppress char if no bindings were triggered.
let mut suppress_chars = None;
// We don't want the key without modifier, because it means something else most of
// the time. However what we want is to manually lowercase the character to account
// for both small and capital letters on regular characters at the same time.
let logical_key = if let Key::Character(ch) = key.logical_key.as_ref() {
// Match `Alt` bindings without `Alt` being applied, otherwise they use the
// composed chars, which are not intuitive to bind.
//
// On Windows, the `Ctrl + Alt` mangles `logical_key` to unidentified values, thus
// preventing them from being used in bindings
//
// For more see https://github.com/rust-windowing/winit/issues/2945.
if (cfg!(target_os = "macos") || (cfg!(windows) && mods.control_key()))
&& mods.alt_key()
{
key.key_without_modifiers()
} else {
Key::Character(ch.to_lowercase().into())
}
} else {
key.logical_key.clone()
};
// Get the action of a key binding.
let mut binding_action = |binding: &KeyBinding| {
let key = match (&binding.trigger, &logical_key) {
(BindingKey::Scancode(_), _) => BindingKey::Scancode(key.physical_key),
(_, code) => {
BindingKey::Keycode { key: code.clone(), location: key.location.into() }
},
};
if binding.is_triggered_by(mode, mods, &key) {
// Pass through the key if any of the bindings has the `ReceiveChar` action.
*suppress_chars.get_or_insert(true) &= binding.action != Action::ReceiveChar;
// Binding was triggered; run the action.
Some(binding.action.clone())
} else {
None
}
};
// Trigger matching key bindings.
for i in 0..self.ctx.config().key_bindings().len() {
let binding = &self.ctx.config().key_bindings()[i];
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
// Trigger key bindings for hints.
for i in 0..self.ctx.config().hints.enabled.len() {
let hint = &self.ctx.config().hints.enabled[i];
let binding = match hint.binding.as_ref() {
Some(binding) => binding.key_binding(hint),
None => continue,
};
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
suppress_chars.unwrap_or(false)
}
/// Handle key release.
fn key_release(&mut self, key: KeyEvent, mode: TermMode, mods: ModifiersState) {
if !mode.contains(TermMode::REPORT_EVENT_TYPES)
|| mode.contains(TermMode::VI)
|| self.ctx.search_active()
|| self.ctx.display().hint_state.active()
{
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let text = key.text_with_all_modifiers().unwrap_or_default();
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let bytes = match key.logical_key.as_ref() {
Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Backspace)
if !mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) =>
{
return
},
_ => build_sequence(key, mods, mode),
};
self.ctx.write_to_pty(bytes);
}
/// Reset search delay.
fn reset_search_delay(&mut self) {
if self.ctx.search_active() {
let timer_id = TimerId::new(Topic::DelayedSearch, self.ctx.window().id());
let scheduler = self.ctx.scheduler_mut();
if let Some(timer) = scheduler.unschedule(timer_id) {
scheduler.schedule(timer.event, TYPING_SEARCH_DELAY, false, timer.id);
}
}
}
}
/// Build a key's keyboard escape sequence based on the given `key`, `mods`, and `mode`.
///
/// The key sequences for `APP_KEYPAD` and alike are handled inside the bindings.
#[inline(never)]
fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8> {
let mut modifiers = mods.into();
let kitty_seq = mode.intersects(
TermMode::REPORT_ALL_KEYS_AS_ESC
| TermMode::DISAMBIGUATE_ESC_CODES
| TermMode::REPORT_EVENT_TYPES,
);
let kitty_encode_all = mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC);
// The default parameter is 1, so we can omit it.
let kitty_event_type = mode.contains(TermMode::REPORT_EVENT_TYPES)
&& (key.repeat || key.state == ElementState::Released);
let context =
SequenceBuilder { mode, modifiers, kitty_seq, kitty_encode_all, kitty_event_type };
let associated_text = key.text_with_all_modifiers().filter(|text| {
mode.contains(TermMode::REPORT_ASSOCIATED_TEXT)
&& key.state != ElementState::Released
&& !text.is_empty()
&& !is_control_character(text)
});
let sequence_base = context
.try_build_numpad(&key)
.or_else(|| context.try_build_named_kitty(&key))
.or_else(|| context.try_build_named_normal(&key, associated_text.is_some()))
.or_else(|| context.try_build_control_char_or_mod(&key, &mut modifiers))
.or_else(|| context.try_build_textual(&key, associated_text));
let (payload, terminator) = match sequence_base {
Some(SequenceBase { payload, terminator }) => (payload, terminator),
_ => return Vec::new(),
};
let mut payload = format!("\x1b[{payload}");
// Add modifiers information.
if kitty_event_type || !modifiers.is_empty() || associated_text.is_some() {
payload.push_str(&format!(";{}", modifiers.encode_esc_sequence()));
}
// Push event type.
if kitty_event_type {
payload.push(':');
let event_type = match key.state {
_ if key.repeat => '2',
ElementState::Pressed => '1',
ElementState::Released => '3',
};
payload.push(event_type);
}
if let Some(text) = associated_text {
let mut codepoints = text.chars().map(u32::from);
if let Some(codepoint) = codepoints.next() {
payload.push_str(&format!(";{codepoint}"));
}
for codepoint in codepoints {
payload.push_str(&format!(":{codepoint}"));
}
}
payload.push(terminator.encode_esc_sequence());
payload.into_bytes()
}
/// Helper to build escape sequence payloads from [`KeyEvent`].
pub struct SequenceBuilder {
mode: TermMode,
/// The emitted sequence should follow the kitty keyboard protocol.
kitty_seq: bool,
/// Encode all the keys according to the protocol.
kitty_encode_all: bool,
/// Report event types.
kitty_event_type: bool,
modifiers: SequenceModifiers,
}
impl SequenceBuilder {
/// Try building sequence from the event's emitting text.
fn try_build_textual(
&self,
key: &KeyEvent,
associated_text: Option<&str>,
) -> Option<SequenceBase> {
let character = match key.logical_key.as_ref() {
Key::Character(character) if self.kitty_seq => character,
_ => return None,
};
if character.chars().count() == 1 {
let shift = self.modifiers.contains(SequenceModifiers::SHIFT);
let ch = character.chars().next().unwrap();
let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };
let alternate_key_code = u32::from(ch);
let mut unicode_key_code = u32::from(unshifted_ch);
// Try to get the base for keys which change based on modifier, like `1` for `!`.
//
// However it should only be performed when `SHIFT` is pressed.
if shift && alternate_key_code == unicode_key_code {
if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {
unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));
}
}
// NOTE: Base layouts are ignored, since winit doesn't expose this information
// yet.
let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)
&& alternate_key_code != unicode_key_code
{
format!("{unicode_key_code}:{alternate_key_code}")
} else {
unicode_key_code.to_string()
};
Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))
} else if self.kitty_encode_all && associated_text.is_some() {
// Fallback when need to report text, but we don't have any key associated with this
// text.
Some(SequenceBase::new("0".into(), SequenceTerminator::Kitty))
} else {
None
}
}
/// Try building from numpad key.
///
/// `None` is returned when the key is neither known nor numpad.
fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {
if !self.kitty_seq || key.location != KeyLocation::Numpad {
return None;
}
let base = match key.logical_key.as_ref() {
Key::Character("0") => "57399",
Key::Character("1") => "57400",
Key::Character("2") => "57401",
Key::Character("3") => "57402",
Key::Character("4") => "57403",
Key::Character("5") => "57404",
Key::Character("6") => "57405",
Key::Character("7") => "57406",
Key::Character("8") => "57407",
Key::Character("9") => "57408",
Key::Character(".") => "57409",
Key::Character("/") => "57410",
Key::Character("*") => "57411",
Key::Character("-") => "57412",
Key::Character("+") => "57413",
Key::Character("=") => "57415",
Key::Named(named) => match named {
NamedKey::Enter => "57414",
NamedKey::ArrowLeft => "57417",
NamedKey::ArrowRight => "57418",
NamedKey::ArrowUp => "57419",
NamedKey::ArrowDown => "57420",
NamedKey::PageUp => "57421",
NamedKey::PageDown => "57422",
NamedKey::Home => "57423",
NamedKey::End => "57424",
NamedKey::Insert => "57425",
NamedKey::Delete => "57426",
_ => return None,
},
_ => return None,
};
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
/// Try building from [`NamedKey`] using the kitty keyboard protocol encoding
/// for functional keys.
fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) if self.kitty_seq => named,
_ => return None,
};
let (base, terminator) = match named {
// F3 in kitty protocol diverges from alacritty's terminfo.
NamedKey::F3 => ("13", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("57376", SequenceTerminator::Kitty),
NamedKey::F14 => ("57377", SequenceTerminator::Kitty),
NamedKey::F15 => ("57378", SequenceTerminator::Kitty),
NamedKey::F16 => ("57379", SequenceTerminator::Kitty),
NamedKey::F17 => ("57380", SequenceTerminator::Kitty),
NamedKey::F18 => ("57381", SequenceTerminator::Kitty),
NamedKey::F19 => ("57382", SequenceTerminator::Kitty),
NamedKey::F20 => ("57383", SequenceTerminator::Kitty),
NamedKey::F21 => ("57384", SequenceTerminator::Kitty),
NamedKey::F22 => ("57385", SequenceTerminator::Kitty),
NamedKey::F23 => ("57386", SequenceTerminator::Kitty),
NamedKey::F24 => ("57387", SequenceTerminator::Kitty),
NamedKey::F25 => ("57388", SequenceTerminator::Kitty),
NamedKey::F26 => ("57389", SequenceTerminator::Kitty),
NamedKey::F27 => ("57390", SequenceTerminator::Kitty),
NamedKey::F28 => ("57391", SequenceTerminator::Kitty),
NamedKey::F29 => ("57392", SequenceTerminator::Kitty),
NamedKey::F30 => ("57393", SequenceTerminator::Kitty),
NamedKey::F31 => ("57394", SequenceTerminator::Kitty),
NamedKey::F32 => ("57395", SequenceTerminator::Kitty),
NamedKey::F33 => ("57396", SequenceTerminator::Kitty),
NamedKey::F34 => ("57397", SequenceTerminator::Kitty),
NamedKey::F35 => ("57398", SequenceTerminator::Kitty),
NamedKey::ScrollLock => ("57359", SequenceTerminator::Kitty),
NamedKey::PrintScreen => ("57361", SequenceTerminator::Kitty),
NamedKey::Pause => ("57362", SequenceTerminator::Kitty),
NamedKey::ContextMenu => ("57363", SequenceTerminator::Kitty),
NamedKey::MediaPlay => ("57428", SequenceTerminator::Kitty),
NamedKey::MediaPause => ("57429", SequenceTerminator::Kitty),
NamedKey::MediaPlayPause => ("57430", SequenceTerminator::Kitty),
NamedKey::MediaStop => ("57432", SequenceTerminator::Kitty),
NamedKey::MediaFastForward => ("57433", SequenceTerminator::Kitty),
NamedKey::MediaRewind => ("57434", SequenceTerminator::Kitty),
NamedKey::MediaTrackNext => ("57435", SequenceTerminator::Kitty),
NamedKey::MediaTrackPrevious => ("57436", SequenceTerminator::Kitty),
NamedKey::MediaRecord => ("57437", SequenceTerminator::Kitty),
NamedKey::AudioVolumeDown => ("57438", SequenceTerminator::Kitty),
NamedKey::AudioVolumeUp => ("57439", SequenceTerminator::Kitty),
NamedKey::AudioVolumeMute => ("57440", SequenceTerminator::Kitty),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building from [`NamedKey`].
fn try_build_named_normal(
&self,
key: &KeyEvent,
has_associated_text: bool,
) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
// The default parameter is 1, so we can omit it.
let one_based =
if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {
""
} else {
"1"
};
let (base, terminator) = match named {
NamedKey::PageUp => ("5", SequenceTerminator::Normal('~')),
NamedKey::PageDown => ("6", SequenceTerminator::Normal('~')),
NamedKey::Insert => ("2", SequenceTerminator::Normal('~')),
NamedKey::Delete => ("3", SequenceTerminator::Normal('~')),
NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),
NamedKey::End => (one_based, SequenceTerminator::Normal('F')),
NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),
NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),
NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),
NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),
NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),
NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),
NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),
NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),
NamedKey::F5 => ("15", SequenceTerminator::Normal('~')),
NamedKey::F6 => ("17", SequenceTerminator::Normal('~')),
NamedKey::F7 => ("18", SequenceTerminator::Normal('~')),
NamedKey::F8 => ("19", SequenceTerminator::Normal('~')),
NamedKey::F9 => ("20", SequenceTerminator::Normal('~')),
NamedKey::F10 => ("21", SequenceTerminator::Normal('~')),
NamedKey::F11 => ("23", SequenceTerminator::Normal('~')),
NamedKey::F12 => ("24", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("25", SequenceTerminator::Normal('~')),
NamedKey::F14 => ("26", SequenceTerminator::Normal('~')),
NamedKey::F15 => ("28", SequenceTerminator::Normal('~')),
NamedKey::F16 => ("29", SequenceTerminator::Normal('~')),
NamedKey::F17 => ("31", SequenceTerminator::Normal('~')),
NamedKey::F18 => ("32", SequenceTerminator::Normal('~')),
NamedKey::F19 => ("33", SequenceTerminator::Normal('~')),
NamedKey::F20 => ("34", SequenceTerminator::Normal('~')),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building escape from control characters (e.g. Enter) and modifiers.
fn try_build_control_char_or_mod(
&self,
key: &KeyEvent,
mods: &mut SequenceModifiers,
) -> Option<SequenceBase> {
if !self.kitty_encode_all && !self.kitty_seq {
return None;
}
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
let base = match named {
NamedKey::Tab => "9",
NamedKey::Enter => "13",
NamedKey::Escape => "27",
NamedKey::Space => "32",
NamedKey::Backspace => "127",
_ => "",
};
// Fail when the key is not a named control character and the active mode prohibits us
// from encoding modifier keys.
if !self.kitty_encode_all && base.is_empty() {
return None;
}
let base = match (named, key.location) {
(NamedKey::Shift, KeyLocation::Left) => "57441",
(NamedKey::Control, KeyLocation::Left) => "57442",
(NamedKey::Alt, KeyLocation::Left) => "57443",
(NamedKey::Super, KeyLocation::Left) => "57444",
(NamedKey::Hyper, KeyLocation::Left) => "57445",
(NamedKey::Meta, KeyLocation::Left) => "57446",
(NamedKey::Shift, _) => "57447",
(NamedKey::Control, _) => "57448",
(NamedKey::Alt, _) => "57449",
(NamedKey::Super, _) => "57450",
(NamedKey::Hyper, _) => "57451",
(NamedKey::Meta, _) => "57452",
(NamedKey::CapsLock, _) => "57358",
(NamedKey::NumLock, _) => "57360",
_ => base,
};
// NOTE: Kitty's protocol mandates that the modifier state is applied before
// key press, however winit sends them after the key press, so for modifiers
// itself apply the state based on keysyms and not the _actual_ modifiers
// state, which is how kitty is doing so and what is suggested in such case.
let press = key.state.is_pressed();
match named {
NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),
NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),
NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),
NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),
_ => (),
}
if base.is_empty() {
None
} else {
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
}
}
pub struct SequenceBase {
/// The base of the payload, which is the `number` and optionally an alt base from the kitty
/// spec.
payload: Cow<'static, str>,
terminator: SequenceTerminator,
}
impl SequenceBase {
fn new(payload: Cow<'static, str>, terminator: SequenceTerminator) -> Self {
Self { payload, terminator }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SequenceTerminator {
/// The normal key esc sequence terminator defined by xterm/dec.
Normal(char),
/// The terminator is for kitty escape sequence.
Kitty,
}
impl SequenceTerminator {
fn encode_esc_sequence(self) -> char {
match self {
SequenceTerminator::Normal(char) => char,
SequenceTerminator::Kitty => 'u',
}
}
}
bitflags::bitflags! {
/// The modifiers encoding for escape sequence.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SequenceModifiers : u8 {
const SHIFT = 0b0000_0001;
const ALT = 0b0000_0010;
const CONTROL = 0b0000_0100;
const SUPER = 0b0000_1000;
// NOTE: Kitty protocol defines additional modifiers to what is present here, like
// Capslock, but it's not a modifier as per winit.
}
}
impl SequenceModifiers {
/// Get the value which should be passed to escape sequence.
pub fn encode_esc_sequence(self) -> u8 {
self.bits() + 1
}
}
impl From<ModifiersState> for SequenceModifiers {
fn from(mods: ModifiersState) -> Self {
let mut modifiers = Self::empty();
modifiers.set(Self::SHIFT, mods.shift_key());
modifiers.set(Self::ALT, mods.alt_key());
modifiers.set(Self::CONTROL, mods.control_key());
modifiers.set(Self::SUPER, mods.super_key());
modifiers
}
}
/// Check whether the `text` is `0x7f`, `C0` or `C1` control code.
fn is_control_character(text: &str) -> bool {
// 0x7f (DEL) is included here since it has a dedicated control code (`^?`) which generally
// does not match the reported text (`^H`), despite not technically being part of C0 or C1.
let codepoint = text.bytes().next().unwrap();
text.len() == 1 && (codepoint < 0x20 || (0x7f..=0x9f).contains(&codepoint))
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct KeyEvent {\n /// Represents the position of a key independent of the currently active layout.\n ///\n /// It also uniquely identifies the physical key (i.e. it's mostly synonymous with a scancode).\n /// The most prevalent use case for this is games. For example the default keys for the player\n /// to move around might be the W, A, S, and D keys on a US layout. The position of these keys\n /// is more important than their label, so they should map to Z, Q, S, and D on an \"AZERTY\"\n /// layout. (This value is `KeyCode::KeyW` for the Z key on an AZERTY layout.)\n ///\n /// ## Caveats\n ///\n /// - Certain niche hardware will shuffle around physical key positions, e.g. a keyboard that\n /// implements DVORAK in hardware (or firmware)\n /// - Your application will likely have to handle keyboards which are missing keys that your\n /// own keyboard has.\n /// - Certain `KeyCode`s will move between a couple of different positions depending on what\n /// layout the keyboard was manufactured to support.\n ///\n /// **Because of these caveats, it is important that you provide users with a way to configure\n /// most (if not all) keybinds in your application.**\n ///\n /// ## `Fn` and `FnLock`\n ///\n /// `Fn` and `FnLock` key events are *exceedingly unlikely* to be emitted by Winit. These keys\n /// are usually handled at the hardware or OS level, and aren't surfaced to applications. If\n /// you somehow see this in the wild, we'd like to know :)\n pub physical_key: keyboard::PhysicalKey,\n\n // Allowing `broken_intra_doc_links` for `logical_key`, because\n // `key_without_modifiers` is not available on all platforms\n #[cfg_attr(\n not(any(windows_platform, macos_platform, x11_platform, wayland_platform)),\n allow(rustdoc::broken_intra_doc_links)\n )]\n /// This value is affected by all modifiers except <kbd>Ctrl</kbd>.\n ///\n /// This has two use cases:\n /// - Allows querying whether the current input is a Dead key.\n /// - Allows handling key-bindings on platforms which don't support [`key_without_modifiers`].\n ///\n /// If you use this field (or [`key_without_modifiers`] for that matter) for keyboard\n /// shortcuts, **it is important that you provide users with a way to configure your\n /// application's shortcuts so you don't render your application unusable for users with an\n /// incompatible keyboard layout.**\n ///\n /// ## Platform-specific\n /// - **Web:** Dead keys might be reported as the real key instead of `Dead` depending on the\n /// browser/OS.\n ///\n /// [`key_without_modifiers`]: crate::platform::modifier_supplement::KeyEventExtModifierSupplement::key_without_modifiers\n pub logical_key: keyboard::Key,\n\n /// Contains the text produced by this keypress.\n ///\n /// In most cases this is identical to the content\n /// of the `Character` variant of `logical_key`.\n /// However, on Windows when a dead key was pressed earlier\n /// but cannot be combined with the character from this\n /// keypress, the produced text will consist of two characters:\n /// the dead-key-character followed by the character resulting\n /// from this keypress.\n ///\n /// An additional difference from `logical_key` is that\n /// this field stores the text representation of any key\n /// that has such a representation. For example when\n /// `logical_key` is `Key::Named(NamedKey::Enter)`, this field is `Some(\"\\r\")`.\n ///\n /// This is `None` if the current keypress cannot\n /// be interpreted as text.\n ///\n /// See also: `text_with_all_modifiers()`\n pub text: Option<SmolStr>,\n\n /// Contains the location of this key on the keyboard.\n ///\n /// Certain keys on the keyboard may appear in more than once place. For example, the \"Shift\"\n /// key appears on the left side of the QWERTY keyboard as well as the right side. However,\n /// both keys have the same symbolic value. Another example of this phenomenon is the \"1\"\n /// key, which appears both above the \"Q\" key and as the \"Keypad 1\" key.\n ///\n /// This field allows the user to differentiate between keys like this that have the same\n /// symbolic value but different locations on the keyboard.\n ///\n /// See the [`KeyLocation`] type for more details.\n ///\n /// [`KeyLocation`]: crate::keyboard::KeyLocation\n pub location: keyboard::KeyLocation,\n\n /// Whether the key is being pressed or released.\n ///\n /// See the [`ElementState`] type for more details.\n pub state: ElementState,\n\n /// Whether or not this key is a key repeat event.\n ///\n /// On some systems, holding down a key for some period of time causes that key to be repeated\n /// as though it were being pressed and released repeatedly. This field is `true` if and only\n /// if this event is the result of one of those repeats.\n ///\n /// # Example\n ///\n /// In games, you often want to ignore repated key events - this can be\n /// done by ignoring events where this property is set.\n ///\n /// ```\n /// use winit::event::{ElementState, KeyEvent, WindowEvent};\n /// use winit::keyboard::{KeyCode, PhysicalKey};\n /// # let window_event = WindowEvent::RedrawRequested; // To make the example compile\n /// match window_event {\n /// WindowEvent::KeyboardInput {\n /// event:\n /// KeyEvent {\n /// physical_key: PhysicalKey::Code(KeyCode::KeyW),\n /// state: ElementState::Pressed,\n /// repeat: false,\n /// ..\n /// },\n /// ..\n /// } => {\n /// // The physical key `W` was pressed, and it was not a repeat\n /// },\n /// _ => {}, // Handle other events\n /// }\n /// ```\n pub repeat: bool,\n\n /// Platform-specific key event information.\n ///\n /// On Windows, Linux and macOS, this type contains the key without modifiers and the text with\n /// all modifiers applied.\n ///\n /// On Android, iOS, Redox and Web, this type is a no-op.\n pub(crate) platform_specific: platform_impl::KeyEventExtra,\n}"
],
"name": "key",
"type": "&KeyEvent"
},
{
"definitions": [
" pub struct TermMode: u32 {\n const NONE = 0;\n const SHOW_CURSOR = 1;\n const APP_CURSOR = 1 << 1;\n const APP_KEYPAD = 1 << 2;\n const MOUSE_REPORT_CLICK = 1 << 3;\n const BRACKETED_PASTE = 1 << 4;\n const SGR_MOUSE = 1 << 5;\n const MOUSE_MOTION = 1 << 6;\n const LINE_WRAP = 1 << 7;\n const LINE_FEED_NEW_LINE = 1 << 8;\n const ORIGIN = 1 << 9;\n const INSERT = 1 << 10;\n const FOCUS_IN_OUT = 1 << 11;\n const ALT_SCREEN = 1 << 12;\n const MOUSE_DRAG = 1 << 13;\n const UTF8_MOUSE = 1 << 14;\n const ALTERNATE_SCROLL = 1 << 15;\n const VI = 1 << 16;\n const URGENCY_HINTS = 1 << 17;\n const DISAMBIGUATE_ESC_CODES = 1 << 18;\n const REPORT_EVENT_TYPES = 1 << 19;\n const REPORT_ALTERNATE_KEYS = 1 << 20;\n const REPORT_ALL_KEYS_AS_ESC = 1 << 21;\n const REPORT_ASSOCIATED_TEXT = 1 << 22;\n const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();\n const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()\n | Self::REPORT_EVENT_TYPES.bits()\n | Self::REPORT_ALTERNATE_KEYS.bits()\n | Self::REPORT_ALL_KEYS_AS_ESC.bits()\n | Self::REPORT_ASSOCIATED_TEXT.bits();\n const ANY = u32::MAX;\n }"
],
"name": "mode",
"type": "TermMode"
},
{
"definitions": [
" pub struct ModifiersState: u32 {\n /// The \"shift\" key.\n const SHIFT = 0b100;\n /// The \"control\" key.\n const CONTROL = 0b100 << 3;\n /// The \"alt\" key.\n const ALT = 0b100 << 6;\n /// This is the \"windows\" key on PC and \"command\" key on Mac.\n const SUPER = 0b100 << 9;\n }"
],
"name": "mods",
"type": "ModifiersState"
}
],
"end_line": 171,
"name": "should_build_sequence",
"signature": "fn should_build_sequence(\n key: &KeyEvent,\n text: &str,\n mode: TermMode,\n mods: ModifiersState,\n ) -> bool",
"start_line": 143
} | {
"class_name": "impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {\n /// Process key input.\n pub fn key_input(&mut self, key: KeyEvent) {\n // IME input will be applied on commit and shouldn't trigger key bindings.\n if self.ctx.display().ime.preedit().is_some() {\n return;\n }\n\n let mode = *self.ctx.terminal().mode();\n let mods = self.ctx.modifiers().state();\n\n if key.state == ElementState::Released {\n if self.ctx.inline_search_state().char_pending {\n self.ctx.window().set_ime_allowed(true);\n }\n self.key_release(key, mode, mods);\n return;\n }\n\n let text = key.text_with_all_modifiers().unwrap_or_default();\n\n // All key bindings are disabled while a hint is being selected.\n if self.ctx.display().hint_state.active() {\n for character in text.chars() {\n self.ctx.hint_input(character);\n }\n return;\n }\n\n // First key after inline search is captured.\n let inline_state = self.ctx.inline_search_state();\n if inline_state.char_pending {\n self.ctx.inline_search_input(text);\n return;\n }\n\n // Reset search delay when the user is still typing.\n self.reset_search_delay();\n\n // Key bindings suppress the character input.\n if self.process_key_bindings(&key) {\n return;\n }\n\n if self.ctx.search_active() {\n for character in text.chars() {\n self.ctx.search_input(character);\n }\n\n return;\n }\n\n // Vi mode on its own doesn't have any input, the search input was done before.\n if mode.contains(TermMode::VI) {\n return;\n }\n\n // Mask `Alt` modifier from input when we won't send esc.\n let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };\n\n let build_key_sequence = Self::should_build_sequence(&key, text, mode, mods);\n let is_modifier_key = Self::is_modifier_key(&key);\n\n let bytes = if build_key_sequence {\n build_sequence(key, mods, mode)\n } else {\n let mut bytes = Vec::with_capacity(text.len() + 1);\n if mods.alt_key() {\n bytes.push(b'\\x1b');\n }\n\n bytes.extend_from_slice(text.as_bytes());\n bytes\n };\n\n // Write only if we have something to write.\n if !bytes.is_empty() {\n // Don't clear selection/scroll down when writing escaped modifier keys.\n if !is_modifier_key {\n self.ctx.on_terminal_input_start();\n }\n self.ctx.write_to_pty(bytes);\n }\n }\n\n fn alt_send_esc(&mut self, key: &KeyEvent, text: &str) -> bool {\n #[cfg(not(target_os = \"macos\"))]\n let alt_send_esc = self.ctx.modifiers().state().alt_key();\n\n #[cfg(target_os = \"macos\")]\n let alt_send_esc = {\n let option_as_alt = self.ctx.config().window.option_as_alt();\n self.ctx.modifiers().state().alt_key()\n && (option_as_alt == OptionAsAlt::Both\n || (option_as_alt == OptionAsAlt::OnlyLeft\n && self.ctx.modifiers().lalt_state() == ModifiersKeyState::Pressed)\n || (option_as_alt == OptionAsAlt::OnlyRight\n && self.ctx.modifiers().ralt_state() == ModifiersKeyState::Pressed))\n };\n\n match key.logical_key {\n Key::Named(named) => {\n if named.to_text().is_some() {\n alt_send_esc\n } else {\n // Treat `Alt` as modifier for named keys without text, like ArrowUp.\n self.ctx.modifiers().state().alt_key()\n }\n },\n _ => alt_send_esc && text.chars().count() == 1,\n }\n }\n\n fn is_modifier_key(key: &KeyEvent) -> bool {\n matches!(\n key.logical_key.as_ref(),\n Key::Named(NamedKey::Shift)\n | Key::Named(NamedKey::Control)\n | Key::Named(NamedKey::Alt)\n | Key::Named(NamedKey::Super)\n )\n }\n\n /// Check whether we should try to build escape sequence for the [`KeyEvent`].\n fn should_build_sequence(\n key: &KeyEvent,\n text: &str,\n mode: TermMode,\n mods: ModifiersState,\n ) -> bool {\n if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {\n return true;\n }\n\n let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)\n && (key.logical_key == Key::Named(NamedKey::Escape)\n || key.location == KeyLocation::Numpad\n || (!mods.is_empty()\n && (mods != ModifiersState::SHIFT\n || matches!(\n key.logical_key,\n Key::Named(NamedKey::Tab)\n | Key::Named(NamedKey::Enter)\n | Key::Named(NamedKey::Backspace)\n ))));\n\n match key.logical_key {\n _ if disambiguate => true,\n // Exclude all the named keys unless they have textual representation.\n Key::Named(named) => named.to_text().is_none(),\n _ => text.is_empty(),\n }\n }\n\n /// Attempt to find a binding and execute its action.\n ///\n /// The provided mode, mods, and key must match what is allowed by a binding\n /// for its action to be executed.\n fn process_key_bindings(&mut self, key: &KeyEvent) -> bool {\n let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());\n let mods = self.ctx.modifiers().state();\n\n // Don't suppress char if no bindings were triggered.\n let mut suppress_chars = None;\n\n // We don't want the key without modifier, because it means something else most of\n // the time. However what we want is to manually lowercase the character to account\n // for both small and capital letters on regular characters at the same time.\n let logical_key = if let Key::Character(ch) = key.logical_key.as_ref() {\n // Match `Alt` bindings without `Alt` being applied, otherwise they use the\n // composed chars, which are not intuitive to bind.\n //\n // On Windows, the `Ctrl + Alt` mangles `logical_key` to unidentified values, thus\n // preventing them from being used in bindings\n //\n // For more see https://github.com/rust-windowing/winit/issues/2945.\n if (cfg!(target_os = \"macos\") || (cfg!(windows) && mods.control_key()))\n && mods.alt_key()\n {\n key.key_without_modifiers()\n } else {\n Key::Character(ch.to_lowercase().into())\n }\n } else {\n key.logical_key.clone()\n };\n\n // Get the action of a key binding.\n let mut binding_action = |binding: &KeyBinding| {\n let key = match (&binding.trigger, &logical_key) {\n (BindingKey::Scancode(_), _) => BindingKey::Scancode(key.physical_key),\n (_, code) => {\n BindingKey::Keycode { key: code.clone(), location: key.location.into() }\n },\n };\n\n if binding.is_triggered_by(mode, mods, &key) {\n // Pass through the key if any of the bindings has the `ReceiveChar` action.\n *suppress_chars.get_or_insert(true) &= binding.action != Action::ReceiveChar;\n\n // Binding was triggered; run the action.\n Some(binding.action.clone())\n } else {\n None\n }\n };\n\n // Trigger matching key bindings.\n for i in 0..self.ctx.config().key_bindings().len() {\n let binding = &self.ctx.config().key_bindings()[i];\n if let Some(action) = binding_action(binding) {\n action.execute(&mut self.ctx);\n }\n }\n\n // Trigger key bindings for hints.\n for i in 0..self.ctx.config().hints.enabled.len() {\n let hint = &self.ctx.config().hints.enabled[i];\n let binding = match hint.binding.as_ref() {\n Some(binding) => binding.key_binding(hint),\n None => continue,\n };\n\n if let Some(action) = binding_action(binding) {\n action.execute(&mut self.ctx);\n }\n }\n\n suppress_chars.unwrap_or(false)\n }\n\n /// Handle key release.\n fn key_release(&mut self, key: KeyEvent, mode: TermMode, mods: ModifiersState) {\n if !mode.contains(TermMode::REPORT_EVENT_TYPES)\n || mode.contains(TermMode::VI)\n || self.ctx.search_active()\n || self.ctx.display().hint_state.active()\n {\n return;\n }\n\n // Mask `Alt` modifier from input when we won't send esc.\n let text = key.text_with_all_modifiers().unwrap_or_default();\n let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };\n\n let bytes = match key.logical_key.as_ref() {\n Key::Named(NamedKey::Enter)\n | Key::Named(NamedKey::Tab)\n | Key::Named(NamedKey::Backspace)\n if !mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) =>\n {\n return\n },\n _ => build_sequence(key, mods, mode),\n };\n\n self.ctx.write_to_pty(bytes);\n }\n\n /// Reset search delay.\n fn reset_search_delay(&mut self) {\n if self.ctx.search_active() {\n let timer_id = TimerId::new(Topic::DelayedSearch, self.ctx.window().id());\n let scheduler = self.ctx.scheduler_mut();\n if let Some(timer) = scheduler.unschedule(timer_id) {\n scheduler.schedule(timer.event, TYPING_SEARCH_DELAY, false, timer.id);\n }\n }\n }\n}",
"class_signature": "impl<T: EventListener, A: ActionContext<T>> Processor<T, A>"
} |
build_sequence | alacritty-master/alacritty/src/input/keyboard.rs | fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8> {
let mut modifiers = mods.into();
let kitty_seq = mode.intersects(
TermMode::REPORT_ALL_KEYS_AS_ESC
| TermMode::DISAMBIGUATE_ESC_CODES
| TermMode::REPORT_EVENT_TYPES,
);
let kitty_encode_all = mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC);
// The default parameter is 1, so we can omit it.
let kitty_event_type = mode.contains(TermMode::REPORT_EVENT_TYPES)
&& (key.repeat || key.state == ElementState::Released);
let context =
SequenceBuilder { mode, modifiers, kitty_seq, kitty_encode_all, kitty_event_type };
let associated_text = key.text_with_all_modifiers().filter(|text| {
mode.contains(TermMode::REPORT_ASSOCIATED_TEXT)
&& key.state != ElementState::Released
&& !text.is_empty()
&& !is_control_character(text)
});
let sequence_base = context
.try_build_numpad(&key)
.or_else(|| context.try_build_named_kitty(&key))
.or_else(|| context.try_build_named_normal(&key, associated_text.is_some()))
.or_else(|| context.try_build_control_char_or_mod(&key, &mut modifiers))
.or_else(|| context.try_build_textual(&key, associated_text));
let (payload, terminator) = match sequence_base {
Some(SequenceBase { payload, terminator }) => (payload, terminator),
_ => return Vec::new(),
};
let mut payload = format!("\x1b[{payload}");
// Add modifiers information.
if kitty_event_type || !modifiers.is_empty() || associated_text.is_some() {
payload.push_str(&format!(";{}", modifiers.encode_esc_sequence()));
}
// Push event type.
if kitty_event_type {
payload.push(':');
let event_type = match key.state {
_ if key.repeat => '2',
ElementState::Pressed => '1',
ElementState::Released => '3',
};
payload.push(event_type);
}
if let Some(text) = associated_text {
let mut codepoints = text.chars().map(u32::from);
if let Some(codepoint) = codepoints.next() {
payload.push_str(&format!(";{codepoint}"));
}
for codepoint in codepoints {
payload.push_str(&format!(":{codepoint}"));
}
}
payload.push(terminator.encode_esc_sequence());
payload.into_bytes()
} | use std::borrow::Cow;
use winit::event::{ElementState, KeyEvent};
#[cfg(target_os = "macos")]
use winit::keyboard::ModifiersKeyState;
use winit::keyboard::{Key, KeyLocation, ModifiersState, NamedKey};
#[cfg(target_os = "macos")]
use winit::platform::macos::OptionAsAlt;
use alacritty_terminal::event::EventListener;
use alacritty_terminal::term::TermMode;
use winit::platform::modifier_supplement::KeyEventExtModifierSupplement;
use crate::config::{Action, BindingKey, BindingMode, KeyBinding};
use crate::event::TYPING_SEARCH_DELAY;
use crate::input::{ActionContext, Execute, Processor};
use crate::scheduler::{TimerId, Topic};
impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {
/// Process key input.
pub fn key_input(&mut self, key: KeyEvent) {
// IME input will be applied on commit and shouldn't trigger key bindings.
if self.ctx.display().ime.preedit().is_some() {
return;
}
let mode = *self.ctx.terminal().mode();
let mods = self.ctx.modifiers().state();
if key.state == ElementState::Released {
if self.ctx.inline_search_state().char_pending {
self.ctx.window().set_ime_allowed(true);
}
self.key_release(key, mode, mods);
return;
}
let text = key.text_with_all_modifiers().unwrap_or_default();
// All key bindings are disabled while a hint is being selected.
if self.ctx.display().hint_state.active() {
for character in text.chars() {
self.ctx.hint_input(character);
}
return;
}
// First key after inline search is captured.
let inline_state = self.ctx.inline_search_state();
if inline_state.char_pending {
self.ctx.inline_search_input(text);
return;
}
// Reset search delay when the user is still typing.
self.reset_search_delay();
// Key bindings suppress the character input.
if self.process_key_bindings(&key) {
return;
}
if self.ctx.search_active() {
for character in text.chars() {
self.ctx.search_input(character);
}
return;
}
// Vi mode on its own doesn't have any input, the search input was done before.
if mode.contains(TermMode::VI) {
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let build_key_sequence = Self::should_build_sequence(&key, text, mode, mods);
let is_modifier_key = Self::is_modifier_key(&key);
let bytes = if build_key_sequence {
build_sequence(key, mods, mode)
} else {
let mut bytes = Vec::with_capacity(text.len() + 1);
if mods.alt_key() {
bytes.push(b'\x1b');
}
bytes.extend_from_slice(text.as_bytes());
bytes
};
// Write only if we have something to write.
if !bytes.is_empty() {
// Don't clear selection/scroll down when writing escaped modifier keys.
if !is_modifier_key {
self.ctx.on_terminal_input_start();
}
self.ctx.write_to_pty(bytes);
}
}
fn alt_send_esc(&mut self, key: &KeyEvent, text: &str) -> bool {
#[cfg(not(target_os = "macos"))]
let alt_send_esc = self.ctx.modifiers().state().alt_key();
#[cfg(target_os = "macos")]
let alt_send_esc = {
let option_as_alt = self.ctx.config().window.option_as_alt();
self.ctx.modifiers().state().alt_key()
&& (option_as_alt == OptionAsAlt::Both
|| (option_as_alt == OptionAsAlt::OnlyLeft
&& self.ctx.modifiers().lalt_state() == ModifiersKeyState::Pressed)
|| (option_as_alt == OptionAsAlt::OnlyRight
&& self.ctx.modifiers().ralt_state() == ModifiersKeyState::Pressed))
};
match key.logical_key {
Key::Named(named) => {
if named.to_text().is_some() {
alt_send_esc
} else {
// Treat `Alt` as modifier for named keys without text, like ArrowUp.
self.ctx.modifiers().state().alt_key()
}
},
_ => alt_send_esc && text.chars().count() == 1,
}
}
fn is_modifier_key(key: &KeyEvent) -> bool {
matches!(
key.logical_key.as_ref(),
Key::Named(NamedKey::Shift)
| Key::Named(NamedKey::Control)
| Key::Named(NamedKey::Alt)
| Key::Named(NamedKey::Super)
)
}
/// Check whether we should try to build escape sequence for the [`KeyEvent`].
fn should_build_sequence(
key: &KeyEvent,
text: &str,
mode: TermMode,
mods: ModifiersState,
) -> bool {
if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {
return true;
}
let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)
&& (key.logical_key == Key::Named(NamedKey::Escape)
|| key.location == KeyLocation::Numpad
|| (!mods.is_empty()
&& (mods != ModifiersState::SHIFT
|| matches!(
key.logical_key,
Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Backspace)
))));
match key.logical_key {
_ if disambiguate => true,
// Exclude all the named keys unless they have textual representation.
Key::Named(named) => named.to_text().is_none(),
_ => text.is_empty(),
}
}
/// Attempt to find a binding and execute its action.
///
/// The provided mode, mods, and key must match what is allowed by a binding
/// for its action to be executed.
fn process_key_bindings(&mut self, key: &KeyEvent) -> bool {
let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());
let mods = self.ctx.modifiers().state();
// Don't suppress char if no bindings were triggered.
let mut suppress_chars = None;
// We don't want the key without modifier, because it means something else most of
// the time. However what we want is to manually lowercase the character to account
// for both small and capital letters on regular characters at the same time.
let logical_key = if let Key::Character(ch) = key.logical_key.as_ref() {
// Match `Alt` bindings without `Alt` being applied, otherwise they use the
// composed chars, which are not intuitive to bind.
//
// On Windows, the `Ctrl + Alt` mangles `logical_key` to unidentified values, thus
// preventing them from being used in bindings
//
// For more see https://github.com/rust-windowing/winit/issues/2945.
if (cfg!(target_os = "macos") || (cfg!(windows) && mods.control_key()))
&& mods.alt_key()
{
key.key_without_modifiers()
} else {
Key::Character(ch.to_lowercase().into())
}
} else {
key.logical_key.clone()
};
// Get the action of a key binding.
let mut binding_action = |binding: &KeyBinding| {
let key = match (&binding.trigger, &logical_key) {
(BindingKey::Scancode(_), _) => BindingKey::Scancode(key.physical_key),
(_, code) => {
BindingKey::Keycode { key: code.clone(), location: key.location.into() }
},
};
if binding.is_triggered_by(mode, mods, &key) {
// Pass through the key if any of the bindings has the `ReceiveChar` action.
*suppress_chars.get_or_insert(true) &= binding.action != Action::ReceiveChar;
// Binding was triggered; run the action.
Some(binding.action.clone())
} else {
None
}
};
// Trigger matching key bindings.
for i in 0..self.ctx.config().key_bindings().len() {
let binding = &self.ctx.config().key_bindings()[i];
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
// Trigger key bindings for hints.
for i in 0..self.ctx.config().hints.enabled.len() {
let hint = &self.ctx.config().hints.enabled[i];
let binding = match hint.binding.as_ref() {
Some(binding) => binding.key_binding(hint),
None => continue,
};
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
suppress_chars.unwrap_or(false)
}
/// Handle key release.
fn key_release(&mut self, key: KeyEvent, mode: TermMode, mods: ModifiersState) {
if !mode.contains(TermMode::REPORT_EVENT_TYPES)
|| mode.contains(TermMode::VI)
|| self.ctx.search_active()
|| self.ctx.display().hint_state.active()
{
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let text = key.text_with_all_modifiers().unwrap_or_default();
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let bytes = match key.logical_key.as_ref() {
Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Backspace)
if !mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) =>
{
return
},
_ => build_sequence(key, mods, mode),
};
self.ctx.write_to_pty(bytes);
}
/// Reset search delay.
fn reset_search_delay(&mut self) {
if self.ctx.search_active() {
let timer_id = TimerId::new(Topic::DelayedSearch, self.ctx.window().id());
let scheduler = self.ctx.scheduler_mut();
if let Some(timer) = scheduler.unschedule(timer_id) {
scheduler.schedule(timer.event, TYPING_SEARCH_DELAY, false, timer.id);
}
}
}
}
/// Build a key's keyboard escape sequence based on the given `key`, `mods`, and `mode`.
///
/// The key sequences for `APP_KEYPAD` and alike are handled inside the bindings.
#[inline(never)]
fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8> {
let mut modifiers = mods.into();
let kitty_seq = mode.intersects(
TermMode::REPORT_ALL_KEYS_AS_ESC
| TermMode::DISAMBIGUATE_ESC_CODES
| TermMode::REPORT_EVENT_TYPES,
);
let kitty_encode_all = mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC);
// The default parameter is 1, so we can omit it.
let kitty_event_type = mode.contains(TermMode::REPORT_EVENT_TYPES)
&& (key.repeat || key.state == ElementState::Released);
let context =
SequenceBuilder { mode, modifiers, kitty_seq, kitty_encode_all, kitty_event_type };
let associated_text = key.text_with_all_modifiers().filter(|text| {
mode.contains(TermMode::REPORT_ASSOCIATED_TEXT)
&& key.state != ElementState::Released
&& !text.is_empty()
&& !is_control_character(text)
});
let sequence_base = context
.try_build_numpad(&key)
.or_else(|| context.try_build_named_kitty(&key))
.or_else(|| context.try_build_named_normal(&key, associated_text.is_some()))
.or_else(|| context.try_build_control_char_or_mod(&key, &mut modifiers))
.or_else(|| context.try_build_textual(&key, associated_text));
let (payload, terminator) = match sequence_base {
Some(SequenceBase { payload, terminator }) => (payload, terminator),
_ => return Vec::new(),
};
let mut payload = format!("\x1b[{payload}");
// Add modifiers information.
if kitty_event_type || !modifiers.is_empty() || associated_text.is_some() {
payload.push_str(&format!(";{}", modifiers.encode_esc_sequence()));
}
// Push event type.
if kitty_event_type {
payload.push(':');
let event_type = match key.state {
_ if key.repeat => '2',
ElementState::Pressed => '1',
ElementState::Released => '3',
};
payload.push(event_type);
}
if let Some(text) = associated_text {
let mut codepoints = text.chars().map(u32::from);
if let Some(codepoint) = codepoints.next() {
payload.push_str(&format!(";{codepoint}"));
}
for codepoint in codepoints {
payload.push_str(&format!(":{codepoint}"));
}
}
payload.push(terminator.encode_esc_sequence());
payload.into_bytes()
}
/// Helper to build escape sequence payloads from [`KeyEvent`].
pub struct SequenceBuilder {
mode: TermMode,
/// The emitted sequence should follow the kitty keyboard protocol.
kitty_seq: bool,
/// Encode all the keys according to the protocol.
kitty_encode_all: bool,
/// Report event types.
kitty_event_type: bool,
modifiers: SequenceModifiers,
}
impl SequenceBuilder {
/// Try building sequence from the event's emitting text.
fn try_build_textual(
&self,
key: &KeyEvent,
associated_text: Option<&str>,
) -> Option<SequenceBase> {
let character = match key.logical_key.as_ref() {
Key::Character(character) if self.kitty_seq => character,
_ => return None,
};
if character.chars().count() == 1 {
let shift = self.modifiers.contains(SequenceModifiers::SHIFT);
let ch = character.chars().next().unwrap();
let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };
let alternate_key_code = u32::from(ch);
let mut unicode_key_code = u32::from(unshifted_ch);
// Try to get the base for keys which change based on modifier, like `1` for `!`.
//
// However it should only be performed when `SHIFT` is pressed.
if shift && alternate_key_code == unicode_key_code {
if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {
unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));
}
}
// NOTE: Base layouts are ignored, since winit doesn't expose this information
// yet.
let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)
&& alternate_key_code != unicode_key_code
{
format!("{unicode_key_code}:{alternate_key_code}")
} else {
unicode_key_code.to_string()
};
Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))
} else if self.kitty_encode_all && associated_text.is_some() {
// Fallback when need to report text, but we don't have any key associated with this
// text.
Some(SequenceBase::new("0".into(), SequenceTerminator::Kitty))
} else {
None
}
}
/// Try building from numpad key.
///
/// `None` is returned when the key is neither known nor numpad.
fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {
if !self.kitty_seq || key.location != KeyLocation::Numpad {
return None;
}
let base = match key.logical_key.as_ref() {
Key::Character("0") => "57399",
Key::Character("1") => "57400",
Key::Character("2") => "57401",
Key::Character("3") => "57402",
Key::Character("4") => "57403",
Key::Character("5") => "57404",
Key::Character("6") => "57405",
Key::Character("7") => "57406",
Key::Character("8") => "57407",
Key::Character("9") => "57408",
Key::Character(".") => "57409",
Key::Character("/") => "57410",
Key::Character("*") => "57411",
Key::Character("-") => "57412",
Key::Character("+") => "57413",
Key::Character("=") => "57415",
Key::Named(named) => match named {
NamedKey::Enter => "57414",
NamedKey::ArrowLeft => "57417",
NamedKey::ArrowRight => "57418",
NamedKey::ArrowUp => "57419",
NamedKey::ArrowDown => "57420",
NamedKey::PageUp => "57421",
NamedKey::PageDown => "57422",
NamedKey::Home => "57423",
NamedKey::End => "57424",
NamedKey::Insert => "57425",
NamedKey::Delete => "57426",
_ => return None,
},
_ => return None,
};
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
/// Try building from [`NamedKey`] using the kitty keyboard protocol encoding
/// for functional keys.
fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) if self.kitty_seq => named,
_ => return None,
};
let (base, terminator) = match named {
// F3 in kitty protocol diverges from alacritty's terminfo.
NamedKey::F3 => ("13", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("57376", SequenceTerminator::Kitty),
NamedKey::F14 => ("57377", SequenceTerminator::Kitty),
NamedKey::F15 => ("57378", SequenceTerminator::Kitty),
NamedKey::F16 => ("57379", SequenceTerminator::Kitty),
NamedKey::F17 => ("57380", SequenceTerminator::Kitty),
NamedKey::F18 => ("57381", SequenceTerminator::Kitty),
NamedKey::F19 => ("57382", SequenceTerminator::Kitty),
NamedKey::F20 => ("57383", SequenceTerminator::Kitty),
NamedKey::F21 => ("57384", SequenceTerminator::Kitty),
NamedKey::F22 => ("57385", SequenceTerminator::Kitty),
NamedKey::F23 => ("57386", SequenceTerminator::Kitty),
NamedKey::F24 => ("57387", SequenceTerminator::Kitty),
NamedKey::F25 => ("57388", SequenceTerminator::Kitty),
NamedKey::F26 => ("57389", SequenceTerminator::Kitty),
NamedKey::F27 => ("57390", SequenceTerminator::Kitty),
NamedKey::F28 => ("57391", SequenceTerminator::Kitty),
NamedKey::F29 => ("57392", SequenceTerminator::Kitty),
NamedKey::F30 => ("57393", SequenceTerminator::Kitty),
NamedKey::F31 => ("57394", SequenceTerminator::Kitty),
NamedKey::F32 => ("57395", SequenceTerminator::Kitty),
NamedKey::F33 => ("57396", SequenceTerminator::Kitty),
NamedKey::F34 => ("57397", SequenceTerminator::Kitty),
NamedKey::F35 => ("57398", SequenceTerminator::Kitty),
NamedKey::ScrollLock => ("57359", SequenceTerminator::Kitty),
NamedKey::PrintScreen => ("57361", SequenceTerminator::Kitty),
NamedKey::Pause => ("57362", SequenceTerminator::Kitty),
NamedKey::ContextMenu => ("57363", SequenceTerminator::Kitty),
NamedKey::MediaPlay => ("57428", SequenceTerminator::Kitty),
NamedKey::MediaPause => ("57429", SequenceTerminator::Kitty),
NamedKey::MediaPlayPause => ("57430", SequenceTerminator::Kitty),
NamedKey::MediaStop => ("57432", SequenceTerminator::Kitty),
NamedKey::MediaFastForward => ("57433", SequenceTerminator::Kitty),
NamedKey::MediaRewind => ("57434", SequenceTerminator::Kitty),
NamedKey::MediaTrackNext => ("57435", SequenceTerminator::Kitty),
NamedKey::MediaTrackPrevious => ("57436", SequenceTerminator::Kitty),
NamedKey::MediaRecord => ("57437", SequenceTerminator::Kitty),
NamedKey::AudioVolumeDown => ("57438", SequenceTerminator::Kitty),
NamedKey::AudioVolumeUp => ("57439", SequenceTerminator::Kitty),
NamedKey::AudioVolumeMute => ("57440", SequenceTerminator::Kitty),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building from [`NamedKey`].
fn try_build_named_normal(
&self,
key: &KeyEvent,
has_associated_text: bool,
) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
// The default parameter is 1, so we can omit it.
let one_based =
if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {
""
} else {
"1"
};
let (base, terminator) = match named {
NamedKey::PageUp => ("5", SequenceTerminator::Normal('~')),
NamedKey::PageDown => ("6", SequenceTerminator::Normal('~')),
NamedKey::Insert => ("2", SequenceTerminator::Normal('~')),
NamedKey::Delete => ("3", SequenceTerminator::Normal('~')),
NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),
NamedKey::End => (one_based, SequenceTerminator::Normal('F')),
NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),
NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),
NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),
NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),
NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),
NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),
NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),
NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),
NamedKey::F5 => ("15", SequenceTerminator::Normal('~')),
NamedKey::F6 => ("17", SequenceTerminator::Normal('~')),
NamedKey::F7 => ("18", SequenceTerminator::Normal('~')),
NamedKey::F8 => ("19", SequenceTerminator::Normal('~')),
NamedKey::F9 => ("20", SequenceTerminator::Normal('~')),
NamedKey::F10 => ("21", SequenceTerminator::Normal('~')),
NamedKey::F11 => ("23", SequenceTerminator::Normal('~')),
NamedKey::F12 => ("24", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("25", SequenceTerminator::Normal('~')),
NamedKey::F14 => ("26", SequenceTerminator::Normal('~')),
NamedKey::F15 => ("28", SequenceTerminator::Normal('~')),
NamedKey::F16 => ("29", SequenceTerminator::Normal('~')),
NamedKey::F17 => ("31", SequenceTerminator::Normal('~')),
NamedKey::F18 => ("32", SequenceTerminator::Normal('~')),
NamedKey::F19 => ("33", SequenceTerminator::Normal('~')),
NamedKey::F20 => ("34", SequenceTerminator::Normal('~')),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building escape from control characters (e.g. Enter) and modifiers.
fn try_build_control_char_or_mod(
&self,
key: &KeyEvent,
mods: &mut SequenceModifiers,
) -> Option<SequenceBase> {
if !self.kitty_encode_all && !self.kitty_seq {
return None;
}
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
let base = match named {
NamedKey::Tab => "9",
NamedKey::Enter => "13",
NamedKey::Escape => "27",
NamedKey::Space => "32",
NamedKey::Backspace => "127",
_ => "",
};
// Fail when the key is not a named control character and the active mode prohibits us
// from encoding modifier keys.
if !self.kitty_encode_all && base.is_empty() {
return None;
}
let base = match (named, key.location) {
(NamedKey::Shift, KeyLocation::Left) => "57441",
(NamedKey::Control, KeyLocation::Left) => "57442",
(NamedKey::Alt, KeyLocation::Left) => "57443",
(NamedKey::Super, KeyLocation::Left) => "57444",
(NamedKey::Hyper, KeyLocation::Left) => "57445",
(NamedKey::Meta, KeyLocation::Left) => "57446",
(NamedKey::Shift, _) => "57447",
(NamedKey::Control, _) => "57448",
(NamedKey::Alt, _) => "57449",
(NamedKey::Super, _) => "57450",
(NamedKey::Hyper, _) => "57451",
(NamedKey::Meta, _) => "57452",
(NamedKey::CapsLock, _) => "57358",
(NamedKey::NumLock, _) => "57360",
_ => base,
};
// NOTE: Kitty's protocol mandates that the modifier state is applied before
// key press, however winit sends them after the key press, so for modifiers
// itself apply the state based on keysyms and not the _actual_ modifiers
// state, which is how kitty is doing so and what is suggested in such case.
let press = key.state.is_pressed();
match named {
NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),
NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),
NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),
NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),
_ => (),
}
if base.is_empty() {
None
} else {
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
}
}
pub struct SequenceBase {
/// The base of the payload, which is the `number` and optionally an alt base from the kitty
/// spec.
payload: Cow<'static, str>,
terminator: SequenceTerminator,
}
impl SequenceBase {
fn new(payload: Cow<'static, str>, terminator: SequenceTerminator) -> Self {
Self { payload, terminator }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SequenceTerminator {
/// The normal key esc sequence terminator defined by xterm/dec.
Normal(char),
/// The terminator is for kitty escape sequence.
Kitty,
}
impl SequenceTerminator {
fn encode_esc_sequence(self) -> char {
match self {
SequenceTerminator::Normal(char) => char,
SequenceTerminator::Kitty => 'u',
}
}
}
bitflags::bitflags! {
/// The modifiers encoding for escape sequence.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SequenceModifiers : u8 {
const SHIFT = 0b0000_0001;
const ALT = 0b0000_0010;
const CONTROL = 0b0000_0100;
const SUPER = 0b0000_1000;
// NOTE: Kitty protocol defines additional modifiers to what is present here, like
// Capslock, but it's not a modifier as per winit.
}
}
impl SequenceModifiers {
/// Get the value which should be passed to escape sequence.
pub fn encode_esc_sequence(self) -> u8 {
self.bits() + 1
}
}
impl From<ModifiersState> for SequenceModifiers {
fn from(mods: ModifiersState) -> Self {
let mut modifiers = Self::empty();
modifiers.set(Self::SHIFT, mods.shift_key());
modifiers.set(Self::ALT, mods.alt_key());
modifiers.set(Self::CONTROL, mods.control_key());
modifiers.set(Self::SUPER, mods.super_key());
modifiers
}
}
/// Check whether the `text` is `0x7f`, `C0` or `C1` control code.
fn is_control_character(text: &str) -> bool {
// 0x7f (DEL) is included here since it has a dedicated control code (`^?`) which generally
// does not match the reported text (`^H`), despite not technically being part of C0 or C1.
let codepoint = text.bytes().next().unwrap();
text.len() == 1 && (codepoint < 0x20 || (0x7f..=0x9f).contains(&codepoint))
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct KeyEvent {\n /// Represents the position of a key independent of the currently active layout.\n ///\n /// It also uniquely identifies the physical key (i.e. it's mostly synonymous with a scancode).\n /// The most prevalent use case for this is games. For example the default keys for the player\n /// to move around might be the W, A, S, and D keys on a US layout. The position of these keys\n /// is more important than their label, so they should map to Z, Q, S, and D on an \"AZERTY\"\n /// layout. (This value is `KeyCode::KeyW` for the Z key on an AZERTY layout.)\n ///\n /// ## Caveats\n ///\n /// - Certain niche hardware will shuffle around physical key positions, e.g. a keyboard that\n /// implements DVORAK in hardware (or firmware)\n /// - Your application will likely have to handle keyboards which are missing keys that your\n /// own keyboard has.\n /// - Certain `KeyCode`s will move between a couple of different positions depending on what\n /// layout the keyboard was manufactured to support.\n ///\n /// **Because of these caveats, it is important that you provide users with a way to configure\n /// most (if not all) keybinds in your application.**\n ///\n /// ## `Fn` and `FnLock`\n ///\n /// `Fn` and `FnLock` key events are *exceedingly unlikely* to be emitted by Winit. These keys\n /// are usually handled at the hardware or OS level, and aren't surfaced to applications. If\n /// you somehow see this in the wild, we'd like to know :)\n pub physical_key: keyboard::PhysicalKey,\n\n // Allowing `broken_intra_doc_links` for `logical_key`, because\n // `key_without_modifiers` is not available on all platforms\n #[cfg_attr(\n not(any(windows_platform, macos_platform, x11_platform, wayland_platform)),\n allow(rustdoc::broken_intra_doc_links)\n )]\n /// This value is affected by all modifiers except <kbd>Ctrl</kbd>.\n ///\n /// This has two use cases:\n /// - Allows querying whether the current input is a Dead key.\n /// - Allows handling key-bindings on platforms which don't support [`key_without_modifiers`].\n ///\n /// If you use this field (or [`key_without_modifiers`] for that matter) for keyboard\n /// shortcuts, **it is important that you provide users with a way to configure your\n /// application's shortcuts so you don't render your application unusable for users with an\n /// incompatible keyboard layout.**\n ///\n /// ## Platform-specific\n /// - **Web:** Dead keys might be reported as the real key instead of `Dead` depending on the\n /// browser/OS.\n ///\n /// [`key_without_modifiers`]: crate::platform::modifier_supplement::KeyEventExtModifierSupplement::key_without_modifiers\n pub logical_key: keyboard::Key,\n\n /// Contains the text produced by this keypress.\n ///\n /// In most cases this is identical to the content\n /// of the `Character` variant of `logical_key`.\n /// However, on Windows when a dead key was pressed earlier\n /// but cannot be combined with the character from this\n /// keypress, the produced text will consist of two characters:\n /// the dead-key-character followed by the character resulting\n /// from this keypress.\n ///\n /// An additional difference from `logical_key` is that\n /// this field stores the text representation of any key\n /// that has such a representation. For example when\n /// `logical_key` is `Key::Named(NamedKey::Enter)`, this field is `Some(\"\\r\")`.\n ///\n /// This is `None` if the current keypress cannot\n /// be interpreted as text.\n ///\n /// See also: `text_with_all_modifiers()`\n pub text: Option<SmolStr>,\n\n /// Contains the location of this key on the keyboard.\n ///\n /// Certain keys on the keyboard may appear in more than once place. For example, the \"Shift\"\n /// key appears on the left side of the QWERTY keyboard as well as the right side. However,\n /// both keys have the same symbolic value. Another example of this phenomenon is the \"1\"\n /// key, which appears both above the \"Q\" key and as the \"Keypad 1\" key.\n ///\n /// This field allows the user to differentiate between keys like this that have the same\n /// symbolic value but different locations on the keyboard.\n ///\n /// See the [`KeyLocation`] type for more details.\n ///\n /// [`KeyLocation`]: crate::keyboard::KeyLocation\n pub location: keyboard::KeyLocation,\n\n /// Whether the key is being pressed or released.\n ///\n /// See the [`ElementState`] type for more details.\n pub state: ElementState,\n\n /// Whether or not this key is a key repeat event.\n ///\n /// On some systems, holding down a key for some period of time causes that key to be repeated\n /// as though it were being pressed and released repeatedly. This field is `true` if and only\n /// if this event is the result of one of those repeats.\n ///\n /// # Example\n ///\n /// In games, you often want to ignore repated key events - this can be\n /// done by ignoring events where this property is set.\n ///\n /// ```\n /// use winit::event::{ElementState, KeyEvent, WindowEvent};\n /// use winit::keyboard::{KeyCode, PhysicalKey};\n /// # let window_event = WindowEvent::RedrawRequested; // To make the example compile\n /// match window_event {\n /// WindowEvent::KeyboardInput {\n /// event:\n /// KeyEvent {\n /// physical_key: PhysicalKey::Code(KeyCode::KeyW),\n /// state: ElementState::Pressed,\n /// repeat: false,\n /// ..\n /// },\n /// ..\n /// } => {\n /// // The physical key `W` was pressed, and it was not a repeat\n /// },\n /// _ => {}, // Handle other events\n /// }\n /// ```\n pub repeat: bool,\n\n /// Platform-specific key event information.\n ///\n /// On Windows, Linux and macOS, this type contains the key without modifiers and the text with\n /// all modifiers applied.\n ///\n /// On Android, iOS, Redox and Web, this type is a no-op.\n pub(crate) platform_specific: platform_impl::KeyEventExtra,\n}"
],
"name": "key",
"type": "KeyEvent"
},
{
"definitions": [
" pub struct ModifiersState: u32 {\n /// The \"shift\" key.\n const SHIFT = 0b100;\n /// The \"control\" key.\n const CONTROL = 0b100 << 3;\n /// The \"alt\" key.\n const ALT = 0b100 << 6;\n /// This is the \"windows\" key on PC and \"command\" key on Mac.\n const SUPER = 0b100 << 9;\n }"
],
"name": "mods",
"type": "ModifiersState"
},
{
"definitions": [
" pub struct TermMode: u32 {\n const NONE = 0;\n const SHOW_CURSOR = 1;\n const APP_CURSOR = 1 << 1;\n const APP_KEYPAD = 1 << 2;\n const MOUSE_REPORT_CLICK = 1 << 3;\n const BRACKETED_PASTE = 1 << 4;\n const SGR_MOUSE = 1 << 5;\n const MOUSE_MOTION = 1 << 6;\n const LINE_WRAP = 1 << 7;\n const LINE_FEED_NEW_LINE = 1 << 8;\n const ORIGIN = 1 << 9;\n const INSERT = 1 << 10;\n const FOCUS_IN_OUT = 1 << 11;\n const ALT_SCREEN = 1 << 12;\n const MOUSE_DRAG = 1 << 13;\n const UTF8_MOUSE = 1 << 14;\n const ALTERNATE_SCROLL = 1 << 15;\n const VI = 1 << 16;\n const URGENCY_HINTS = 1 << 17;\n const DISAMBIGUATE_ESC_CODES = 1 << 18;\n const REPORT_EVENT_TYPES = 1 << 19;\n const REPORT_ALTERNATE_KEYS = 1 << 20;\n const REPORT_ALL_KEYS_AS_ESC = 1 << 21;\n const REPORT_ASSOCIATED_TEXT = 1 << 22;\n const MOUSE_MODE = Self::MOUSE_REPORT_CLICK.bits() | Self::MOUSE_MOTION.bits() | Self::MOUSE_DRAG.bits();\n const KITTY_KEYBOARD_PROTOCOL = Self::DISAMBIGUATE_ESC_CODES.bits()\n | Self::REPORT_EVENT_TYPES.bits()\n | Self::REPORT_ALTERNATE_KEYS.bits()\n | Self::REPORT_ALL_KEYS_AS_ESC.bits()\n | Self::REPORT_ASSOCIATED_TEXT.bits();\n const ANY = u32::MAX;\n }"
],
"name": "mode",
"type": "TermMode"
}
],
"end_line": 361,
"name": "build_sequence",
"signature": "fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8>",
"start_line": 294
} | {
"class_name": "",
"class_signature": ""
} |
try_build_textual | alacritty-master/alacritty/src/input/keyboard.rs | fn try_build_textual(
&self,
key: &KeyEvent,
associated_text: Option<&str>,
) -> Option<SequenceBase> {
let character = match key.logical_key.as_ref() {
Key::Character(character) if self.kitty_seq => character,
_ => return None,
};
if character.chars().count() == 1 {
let shift = self.modifiers.contains(SequenceModifiers::SHIFT);
let ch = character.chars().next().unwrap();
let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };
let alternate_key_code = u32::from(ch);
let mut unicode_key_code = u32::from(unshifted_ch);
// Try to get the base for keys which change based on modifier, like `1` for `!`.
//
// However it should only be performed when `SHIFT` is pressed.
if shift && alternate_key_code == unicode_key_code {
if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {
unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));
}
}
// NOTE: Base layouts are ignored, since winit doesn't expose this information
// yet.
let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)
&& alternate_key_code != unicode_key_code
{
format!("{unicode_key_code}:{alternate_key_code}")
} else {
unicode_key_code.to_string()
};
Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))
} else if self.kitty_encode_all && associated_text.is_some() {
// Fallback when need to report text, but we don't have any key associated with this
// text.
Some(SequenceBase::new("0".into(), SequenceTerminator::Kitty))
} else {
None
}
} | use std::borrow::Cow;
use winit::event::{ElementState, KeyEvent};
#[cfg(target_os = "macos")]
use winit::keyboard::ModifiersKeyState;
use winit::keyboard::{Key, KeyLocation, ModifiersState, NamedKey};
#[cfg(target_os = "macos")]
use winit::platform::macos::OptionAsAlt;
use alacritty_terminal::event::EventListener;
use alacritty_terminal::term::TermMode;
use winit::platform::modifier_supplement::KeyEventExtModifierSupplement;
use crate::config::{Action, BindingKey, BindingMode, KeyBinding};
use crate::event::TYPING_SEARCH_DELAY;
use crate::input::{ActionContext, Execute, Processor};
use crate::scheduler::{TimerId, Topic};
impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {
/// Process key input.
pub fn key_input(&mut self, key: KeyEvent) {
// IME input will be applied on commit and shouldn't trigger key bindings.
if self.ctx.display().ime.preedit().is_some() {
return;
}
let mode = *self.ctx.terminal().mode();
let mods = self.ctx.modifiers().state();
if key.state == ElementState::Released {
if self.ctx.inline_search_state().char_pending {
self.ctx.window().set_ime_allowed(true);
}
self.key_release(key, mode, mods);
return;
}
let text = key.text_with_all_modifiers().unwrap_or_default();
// All key bindings are disabled while a hint is being selected.
if self.ctx.display().hint_state.active() {
for character in text.chars() {
self.ctx.hint_input(character);
}
return;
}
// First key after inline search is captured.
let inline_state = self.ctx.inline_search_state();
if inline_state.char_pending {
self.ctx.inline_search_input(text);
return;
}
// Reset search delay when the user is still typing.
self.reset_search_delay();
// Key bindings suppress the character input.
if self.process_key_bindings(&key) {
return;
}
if self.ctx.search_active() {
for character in text.chars() {
self.ctx.search_input(character);
}
return;
}
// Vi mode on its own doesn't have any input, the search input was done before.
if mode.contains(TermMode::VI) {
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let build_key_sequence = Self::should_build_sequence(&key, text, mode, mods);
let is_modifier_key = Self::is_modifier_key(&key);
let bytes = if build_key_sequence {
build_sequence(key, mods, mode)
} else {
let mut bytes = Vec::with_capacity(text.len() + 1);
if mods.alt_key() {
bytes.push(b'\x1b');
}
bytes.extend_from_slice(text.as_bytes());
bytes
};
// Write only if we have something to write.
if !bytes.is_empty() {
// Don't clear selection/scroll down when writing escaped modifier keys.
if !is_modifier_key {
self.ctx.on_terminal_input_start();
}
self.ctx.write_to_pty(bytes);
}
}
fn alt_send_esc(&mut self, key: &KeyEvent, text: &str) -> bool {
#[cfg(not(target_os = "macos"))]
let alt_send_esc = self.ctx.modifiers().state().alt_key();
#[cfg(target_os = "macos")]
let alt_send_esc = {
let option_as_alt = self.ctx.config().window.option_as_alt();
self.ctx.modifiers().state().alt_key()
&& (option_as_alt == OptionAsAlt::Both
|| (option_as_alt == OptionAsAlt::OnlyLeft
&& self.ctx.modifiers().lalt_state() == ModifiersKeyState::Pressed)
|| (option_as_alt == OptionAsAlt::OnlyRight
&& self.ctx.modifiers().ralt_state() == ModifiersKeyState::Pressed))
};
match key.logical_key {
Key::Named(named) => {
if named.to_text().is_some() {
alt_send_esc
} else {
// Treat `Alt` as modifier for named keys without text, like ArrowUp.
self.ctx.modifiers().state().alt_key()
}
},
_ => alt_send_esc && text.chars().count() == 1,
}
}
fn is_modifier_key(key: &KeyEvent) -> bool {
matches!(
key.logical_key.as_ref(),
Key::Named(NamedKey::Shift)
| Key::Named(NamedKey::Control)
| Key::Named(NamedKey::Alt)
| Key::Named(NamedKey::Super)
)
}
/// Check whether we should try to build escape sequence for the [`KeyEvent`].
fn should_build_sequence(
key: &KeyEvent,
text: &str,
mode: TermMode,
mods: ModifiersState,
) -> bool {
if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {
return true;
}
let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)
&& (key.logical_key == Key::Named(NamedKey::Escape)
|| key.location == KeyLocation::Numpad
|| (!mods.is_empty()
&& (mods != ModifiersState::SHIFT
|| matches!(
key.logical_key,
Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Backspace)
))));
match key.logical_key {
_ if disambiguate => true,
// Exclude all the named keys unless they have textual representation.
Key::Named(named) => named.to_text().is_none(),
_ => text.is_empty(),
}
}
/// Attempt to find a binding and execute its action.
///
/// The provided mode, mods, and key must match what is allowed by a binding
/// for its action to be executed.
fn process_key_bindings(&mut self, key: &KeyEvent) -> bool {
let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());
let mods = self.ctx.modifiers().state();
// Don't suppress char if no bindings were triggered.
let mut suppress_chars = None;
// We don't want the key without modifier, because it means something else most of
// the time. However what we want is to manually lowercase the character to account
// for both small and capital letters on regular characters at the same time.
let logical_key = if let Key::Character(ch) = key.logical_key.as_ref() {
// Match `Alt` bindings without `Alt` being applied, otherwise they use the
// composed chars, which are not intuitive to bind.
//
// On Windows, the `Ctrl + Alt` mangles `logical_key` to unidentified values, thus
// preventing them from being used in bindings
//
// For more see https://github.com/rust-windowing/winit/issues/2945.
if (cfg!(target_os = "macos") || (cfg!(windows) && mods.control_key()))
&& mods.alt_key()
{
key.key_without_modifiers()
} else {
Key::Character(ch.to_lowercase().into())
}
} else {
key.logical_key.clone()
};
// Get the action of a key binding.
let mut binding_action = |binding: &KeyBinding| {
let key = match (&binding.trigger, &logical_key) {
(BindingKey::Scancode(_), _) => BindingKey::Scancode(key.physical_key),
(_, code) => {
BindingKey::Keycode { key: code.clone(), location: key.location.into() }
},
};
if binding.is_triggered_by(mode, mods, &key) {
// Pass through the key if any of the bindings has the `ReceiveChar` action.
*suppress_chars.get_or_insert(true) &= binding.action != Action::ReceiveChar;
// Binding was triggered; run the action.
Some(binding.action.clone())
} else {
None
}
};
// Trigger matching key bindings.
for i in 0..self.ctx.config().key_bindings().len() {
let binding = &self.ctx.config().key_bindings()[i];
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
// Trigger key bindings for hints.
for i in 0..self.ctx.config().hints.enabled.len() {
let hint = &self.ctx.config().hints.enabled[i];
let binding = match hint.binding.as_ref() {
Some(binding) => binding.key_binding(hint),
None => continue,
};
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
suppress_chars.unwrap_or(false)
}
/// Handle key release.
fn key_release(&mut self, key: KeyEvent, mode: TermMode, mods: ModifiersState) {
if !mode.contains(TermMode::REPORT_EVENT_TYPES)
|| mode.contains(TermMode::VI)
|| self.ctx.search_active()
|| self.ctx.display().hint_state.active()
{
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let text = key.text_with_all_modifiers().unwrap_or_default();
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let bytes = match key.logical_key.as_ref() {
Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Backspace)
if !mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) =>
{
return
},
_ => build_sequence(key, mods, mode),
};
self.ctx.write_to_pty(bytes);
}
/// Reset search delay.
fn reset_search_delay(&mut self) {
if self.ctx.search_active() {
let timer_id = TimerId::new(Topic::DelayedSearch, self.ctx.window().id());
let scheduler = self.ctx.scheduler_mut();
if let Some(timer) = scheduler.unschedule(timer_id) {
scheduler.schedule(timer.event, TYPING_SEARCH_DELAY, false, timer.id);
}
}
}
}
/// Build a key's keyboard escape sequence based on the given `key`, `mods`, and `mode`.
///
/// The key sequences for `APP_KEYPAD` and alike are handled inside the bindings.
#[inline(never)]
fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8> {
let mut modifiers = mods.into();
let kitty_seq = mode.intersects(
TermMode::REPORT_ALL_KEYS_AS_ESC
| TermMode::DISAMBIGUATE_ESC_CODES
| TermMode::REPORT_EVENT_TYPES,
);
let kitty_encode_all = mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC);
// The default parameter is 1, so we can omit it.
let kitty_event_type = mode.contains(TermMode::REPORT_EVENT_TYPES)
&& (key.repeat || key.state == ElementState::Released);
let context =
SequenceBuilder { mode, modifiers, kitty_seq, kitty_encode_all, kitty_event_type };
let associated_text = key.text_with_all_modifiers().filter(|text| {
mode.contains(TermMode::REPORT_ASSOCIATED_TEXT)
&& key.state != ElementState::Released
&& !text.is_empty()
&& !is_control_character(text)
});
let sequence_base = context
.try_build_numpad(&key)
.or_else(|| context.try_build_named_kitty(&key))
.or_else(|| context.try_build_named_normal(&key, associated_text.is_some()))
.or_else(|| context.try_build_control_char_or_mod(&key, &mut modifiers))
.or_else(|| context.try_build_textual(&key, associated_text));
let (payload, terminator) = match sequence_base {
Some(SequenceBase { payload, terminator }) => (payload, terminator),
_ => return Vec::new(),
};
let mut payload = format!("\x1b[{payload}");
// Add modifiers information.
if kitty_event_type || !modifiers.is_empty() || associated_text.is_some() {
payload.push_str(&format!(";{}", modifiers.encode_esc_sequence()));
}
// Push event type.
if kitty_event_type {
payload.push(':');
let event_type = match key.state {
_ if key.repeat => '2',
ElementState::Pressed => '1',
ElementState::Released => '3',
};
payload.push(event_type);
}
if let Some(text) = associated_text {
let mut codepoints = text.chars().map(u32::from);
if let Some(codepoint) = codepoints.next() {
payload.push_str(&format!(";{codepoint}"));
}
for codepoint in codepoints {
payload.push_str(&format!(":{codepoint}"));
}
}
payload.push(terminator.encode_esc_sequence());
payload.into_bytes()
}
/// Helper to build escape sequence payloads from [`KeyEvent`].
pub struct SequenceBuilder {
mode: TermMode,
/// The emitted sequence should follow the kitty keyboard protocol.
kitty_seq: bool,
/// Encode all the keys according to the protocol.
kitty_encode_all: bool,
/// Report event types.
kitty_event_type: bool,
modifiers: SequenceModifiers,
}
impl SequenceBuilder {
/// Try building sequence from the event's emitting text.
fn try_build_textual(
&self,
key: &KeyEvent,
associated_text: Option<&str>,
) -> Option<SequenceBase> {
let character = match key.logical_key.as_ref() {
Key::Character(character) if self.kitty_seq => character,
_ => return None,
};
if character.chars().count() == 1 {
let shift = self.modifiers.contains(SequenceModifiers::SHIFT);
let ch = character.chars().next().unwrap();
let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };
let alternate_key_code = u32::from(ch);
let mut unicode_key_code = u32::from(unshifted_ch);
// Try to get the base for keys which change based on modifier, like `1` for `!`.
//
// However it should only be performed when `SHIFT` is pressed.
if shift && alternate_key_code == unicode_key_code {
if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {
unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));
}
}
// NOTE: Base layouts are ignored, since winit doesn't expose this information
// yet.
let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)
&& alternate_key_code != unicode_key_code
{
format!("{unicode_key_code}:{alternate_key_code}")
} else {
unicode_key_code.to_string()
};
Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))
} else if self.kitty_encode_all && associated_text.is_some() {
// Fallback when need to report text, but we don't have any key associated with this
// text.
Some(SequenceBase::new("0".into(), SequenceTerminator::Kitty))
} else {
None
}
}
/// Try building from numpad key.
///
/// `None` is returned when the key is neither known nor numpad.
fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {
if !self.kitty_seq || key.location != KeyLocation::Numpad {
return None;
}
let base = match key.logical_key.as_ref() {
Key::Character("0") => "57399",
Key::Character("1") => "57400",
Key::Character("2") => "57401",
Key::Character("3") => "57402",
Key::Character("4") => "57403",
Key::Character("5") => "57404",
Key::Character("6") => "57405",
Key::Character("7") => "57406",
Key::Character("8") => "57407",
Key::Character("9") => "57408",
Key::Character(".") => "57409",
Key::Character("/") => "57410",
Key::Character("*") => "57411",
Key::Character("-") => "57412",
Key::Character("+") => "57413",
Key::Character("=") => "57415",
Key::Named(named) => match named {
NamedKey::Enter => "57414",
NamedKey::ArrowLeft => "57417",
NamedKey::ArrowRight => "57418",
NamedKey::ArrowUp => "57419",
NamedKey::ArrowDown => "57420",
NamedKey::PageUp => "57421",
NamedKey::PageDown => "57422",
NamedKey::Home => "57423",
NamedKey::End => "57424",
NamedKey::Insert => "57425",
NamedKey::Delete => "57426",
_ => return None,
},
_ => return None,
};
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
/// Try building from [`NamedKey`] using the kitty keyboard protocol encoding
/// for functional keys.
fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) if self.kitty_seq => named,
_ => return None,
};
let (base, terminator) = match named {
// F3 in kitty protocol diverges from alacritty's terminfo.
NamedKey::F3 => ("13", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("57376", SequenceTerminator::Kitty),
NamedKey::F14 => ("57377", SequenceTerminator::Kitty),
NamedKey::F15 => ("57378", SequenceTerminator::Kitty),
NamedKey::F16 => ("57379", SequenceTerminator::Kitty),
NamedKey::F17 => ("57380", SequenceTerminator::Kitty),
NamedKey::F18 => ("57381", SequenceTerminator::Kitty),
NamedKey::F19 => ("57382", SequenceTerminator::Kitty),
NamedKey::F20 => ("57383", SequenceTerminator::Kitty),
NamedKey::F21 => ("57384", SequenceTerminator::Kitty),
NamedKey::F22 => ("57385", SequenceTerminator::Kitty),
NamedKey::F23 => ("57386", SequenceTerminator::Kitty),
NamedKey::F24 => ("57387", SequenceTerminator::Kitty),
NamedKey::F25 => ("57388", SequenceTerminator::Kitty),
NamedKey::F26 => ("57389", SequenceTerminator::Kitty),
NamedKey::F27 => ("57390", SequenceTerminator::Kitty),
NamedKey::F28 => ("57391", SequenceTerminator::Kitty),
NamedKey::F29 => ("57392", SequenceTerminator::Kitty),
NamedKey::F30 => ("57393", SequenceTerminator::Kitty),
NamedKey::F31 => ("57394", SequenceTerminator::Kitty),
NamedKey::F32 => ("57395", SequenceTerminator::Kitty),
NamedKey::F33 => ("57396", SequenceTerminator::Kitty),
NamedKey::F34 => ("57397", SequenceTerminator::Kitty),
NamedKey::F35 => ("57398", SequenceTerminator::Kitty),
NamedKey::ScrollLock => ("57359", SequenceTerminator::Kitty),
NamedKey::PrintScreen => ("57361", SequenceTerminator::Kitty),
NamedKey::Pause => ("57362", SequenceTerminator::Kitty),
NamedKey::ContextMenu => ("57363", SequenceTerminator::Kitty),
NamedKey::MediaPlay => ("57428", SequenceTerminator::Kitty),
NamedKey::MediaPause => ("57429", SequenceTerminator::Kitty),
NamedKey::MediaPlayPause => ("57430", SequenceTerminator::Kitty),
NamedKey::MediaStop => ("57432", SequenceTerminator::Kitty),
NamedKey::MediaFastForward => ("57433", SequenceTerminator::Kitty),
NamedKey::MediaRewind => ("57434", SequenceTerminator::Kitty),
NamedKey::MediaTrackNext => ("57435", SequenceTerminator::Kitty),
NamedKey::MediaTrackPrevious => ("57436", SequenceTerminator::Kitty),
NamedKey::MediaRecord => ("57437", SequenceTerminator::Kitty),
NamedKey::AudioVolumeDown => ("57438", SequenceTerminator::Kitty),
NamedKey::AudioVolumeUp => ("57439", SequenceTerminator::Kitty),
NamedKey::AudioVolumeMute => ("57440", SequenceTerminator::Kitty),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building from [`NamedKey`].
fn try_build_named_normal(
&self,
key: &KeyEvent,
has_associated_text: bool,
) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
// The default parameter is 1, so we can omit it.
let one_based =
if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {
""
} else {
"1"
};
let (base, terminator) = match named {
NamedKey::PageUp => ("5", SequenceTerminator::Normal('~')),
NamedKey::PageDown => ("6", SequenceTerminator::Normal('~')),
NamedKey::Insert => ("2", SequenceTerminator::Normal('~')),
NamedKey::Delete => ("3", SequenceTerminator::Normal('~')),
NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),
NamedKey::End => (one_based, SequenceTerminator::Normal('F')),
NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),
NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),
NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),
NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),
NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),
NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),
NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),
NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),
NamedKey::F5 => ("15", SequenceTerminator::Normal('~')),
NamedKey::F6 => ("17", SequenceTerminator::Normal('~')),
NamedKey::F7 => ("18", SequenceTerminator::Normal('~')),
NamedKey::F8 => ("19", SequenceTerminator::Normal('~')),
NamedKey::F9 => ("20", SequenceTerminator::Normal('~')),
NamedKey::F10 => ("21", SequenceTerminator::Normal('~')),
NamedKey::F11 => ("23", SequenceTerminator::Normal('~')),
NamedKey::F12 => ("24", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("25", SequenceTerminator::Normal('~')),
NamedKey::F14 => ("26", SequenceTerminator::Normal('~')),
NamedKey::F15 => ("28", SequenceTerminator::Normal('~')),
NamedKey::F16 => ("29", SequenceTerminator::Normal('~')),
NamedKey::F17 => ("31", SequenceTerminator::Normal('~')),
NamedKey::F18 => ("32", SequenceTerminator::Normal('~')),
NamedKey::F19 => ("33", SequenceTerminator::Normal('~')),
NamedKey::F20 => ("34", SequenceTerminator::Normal('~')),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building escape from control characters (e.g. Enter) and modifiers.
fn try_build_control_char_or_mod(
&self,
key: &KeyEvent,
mods: &mut SequenceModifiers,
) -> Option<SequenceBase> {
if !self.kitty_encode_all && !self.kitty_seq {
return None;
}
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
let base = match named {
NamedKey::Tab => "9",
NamedKey::Enter => "13",
NamedKey::Escape => "27",
NamedKey::Space => "32",
NamedKey::Backspace => "127",
_ => "",
};
// Fail when the key is not a named control character and the active mode prohibits us
// from encoding modifier keys.
if !self.kitty_encode_all && base.is_empty() {
return None;
}
let base = match (named, key.location) {
(NamedKey::Shift, KeyLocation::Left) => "57441",
(NamedKey::Control, KeyLocation::Left) => "57442",
(NamedKey::Alt, KeyLocation::Left) => "57443",
(NamedKey::Super, KeyLocation::Left) => "57444",
(NamedKey::Hyper, KeyLocation::Left) => "57445",
(NamedKey::Meta, KeyLocation::Left) => "57446",
(NamedKey::Shift, _) => "57447",
(NamedKey::Control, _) => "57448",
(NamedKey::Alt, _) => "57449",
(NamedKey::Super, _) => "57450",
(NamedKey::Hyper, _) => "57451",
(NamedKey::Meta, _) => "57452",
(NamedKey::CapsLock, _) => "57358",
(NamedKey::NumLock, _) => "57360",
_ => base,
};
// NOTE: Kitty's protocol mandates that the modifier state is applied before
// key press, however winit sends them after the key press, so for modifiers
// itself apply the state based on keysyms and not the _actual_ modifiers
// state, which is how kitty is doing so and what is suggested in such case.
let press = key.state.is_pressed();
match named {
NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),
NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),
NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),
NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),
_ => (),
}
if base.is_empty() {
None
} else {
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
}
}
pub struct SequenceBase {
/// The base of the payload, which is the `number` and optionally an alt base from the kitty
/// spec.
payload: Cow<'static, str>,
terminator: SequenceTerminator,
}
impl SequenceBase {
fn new(payload: Cow<'static, str>, terminator: SequenceTerminator) -> Self {
Self { payload, terminator }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SequenceTerminator {
/// The normal key esc sequence terminator defined by xterm/dec.
Normal(char),
/// The terminator is for kitty escape sequence.
Kitty,
}
impl SequenceTerminator {
fn encode_esc_sequence(self) -> char {
match self {
SequenceTerminator::Normal(char) => char,
SequenceTerminator::Kitty => 'u',
}
}
}
bitflags::bitflags! {
/// The modifiers encoding for escape sequence.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SequenceModifiers : u8 {
const SHIFT = 0b0000_0001;
const ALT = 0b0000_0010;
const CONTROL = 0b0000_0100;
const SUPER = 0b0000_1000;
// NOTE: Kitty protocol defines additional modifiers to what is present here, like
// Capslock, but it's not a modifier as per winit.
}
}
impl SequenceModifiers {
/// Get the value which should be passed to escape sequence.
pub fn encode_esc_sequence(self) -> u8 {
self.bits() + 1
}
}
impl From<ModifiersState> for SequenceModifiers {
fn from(mods: ModifiersState) -> Self {
let mut modifiers = Self::empty();
modifiers.set(Self::SHIFT, mods.shift_key());
modifiers.set(Self::ALT, mods.alt_key());
modifiers.set(Self::CONTROL, mods.control_key());
modifiers.set(Self::SUPER, mods.super_key());
modifiers
}
}
/// Check whether the `text` is `0x7f`, `C0` or `C1` control code.
fn is_control_character(text: &str) -> bool {
// 0x7f (DEL) is included here since it has a dedicated control code (`^?`) which generally
// does not match the reported text (`^H`), despite not technically being part of C0 or C1.
let codepoint = text.bytes().next().unwrap();
text.len() == 1 && (codepoint < 0x20 || (0x7f..=0x9f).contains(&codepoint))
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct KeyEvent {\n /// Represents the position of a key independent of the currently active layout.\n ///\n /// It also uniquely identifies the physical key (i.e. it's mostly synonymous with a scancode).\n /// The most prevalent use case for this is games. For example the default keys for the player\n /// to move around might be the W, A, S, and D keys on a US layout. The position of these keys\n /// is more important than their label, so they should map to Z, Q, S, and D on an \"AZERTY\"\n /// layout. (This value is `KeyCode::KeyW` for the Z key on an AZERTY layout.)\n ///\n /// ## Caveats\n ///\n /// - Certain niche hardware will shuffle around physical key positions, e.g. a keyboard that\n /// implements DVORAK in hardware (or firmware)\n /// - Your application will likely have to handle keyboards which are missing keys that your\n /// own keyboard has.\n /// - Certain `KeyCode`s will move between a couple of different positions depending on what\n /// layout the keyboard was manufactured to support.\n ///\n /// **Because of these caveats, it is important that you provide users with a way to configure\n /// most (if not all) keybinds in your application.**\n ///\n /// ## `Fn` and `FnLock`\n ///\n /// `Fn` and `FnLock` key events are *exceedingly unlikely* to be emitted by Winit. These keys\n /// are usually handled at the hardware or OS level, and aren't surfaced to applications. If\n /// you somehow see this in the wild, we'd like to know :)\n pub physical_key: keyboard::PhysicalKey,\n\n // Allowing `broken_intra_doc_links` for `logical_key`, because\n // `key_without_modifiers` is not available on all platforms\n #[cfg_attr(\n not(any(windows_platform, macos_platform, x11_platform, wayland_platform)),\n allow(rustdoc::broken_intra_doc_links)\n )]\n /// This value is affected by all modifiers except <kbd>Ctrl</kbd>.\n ///\n /// This has two use cases:\n /// - Allows querying whether the current input is a Dead key.\n /// - Allows handling key-bindings on platforms which don't support [`key_without_modifiers`].\n ///\n /// If you use this field (or [`key_without_modifiers`] for that matter) for keyboard\n /// shortcuts, **it is important that you provide users with a way to configure your\n /// application's shortcuts so you don't render your application unusable for users with an\n /// incompatible keyboard layout.**\n ///\n /// ## Platform-specific\n /// - **Web:** Dead keys might be reported as the real key instead of `Dead` depending on the\n /// browser/OS.\n ///\n /// [`key_without_modifiers`]: crate::platform::modifier_supplement::KeyEventExtModifierSupplement::key_without_modifiers\n pub logical_key: keyboard::Key,\n\n /// Contains the text produced by this keypress.\n ///\n /// In most cases this is identical to the content\n /// of the `Character` variant of `logical_key`.\n /// However, on Windows when a dead key was pressed earlier\n /// but cannot be combined with the character from this\n /// keypress, the produced text will consist of two characters:\n /// the dead-key-character followed by the character resulting\n /// from this keypress.\n ///\n /// An additional difference from `logical_key` is that\n /// this field stores the text representation of any key\n /// that has such a representation. For example when\n /// `logical_key` is `Key::Named(NamedKey::Enter)`, this field is `Some(\"\\r\")`.\n ///\n /// This is `None` if the current keypress cannot\n /// be interpreted as text.\n ///\n /// See also: `text_with_all_modifiers()`\n pub text: Option<SmolStr>,\n\n /// Contains the location of this key on the keyboard.\n ///\n /// Certain keys on the keyboard may appear in more than once place. For example, the \"Shift\"\n /// key appears on the left side of the QWERTY keyboard as well as the right side. However,\n /// both keys have the same symbolic value. Another example of this phenomenon is the \"1\"\n /// key, which appears both above the \"Q\" key and as the \"Keypad 1\" key.\n ///\n /// This field allows the user to differentiate between keys like this that have the same\n /// symbolic value but different locations on the keyboard.\n ///\n /// See the [`KeyLocation`] type for more details.\n ///\n /// [`KeyLocation`]: crate::keyboard::KeyLocation\n pub location: keyboard::KeyLocation,\n\n /// Whether the key is being pressed or released.\n ///\n /// See the [`ElementState`] type for more details.\n pub state: ElementState,\n\n /// Whether or not this key is a key repeat event.\n ///\n /// On some systems, holding down a key for some period of time causes that key to be repeated\n /// as though it were being pressed and released repeatedly. This field is `true` if and only\n /// if this event is the result of one of those repeats.\n ///\n /// # Example\n ///\n /// In games, you often want to ignore repated key events - this can be\n /// done by ignoring events where this property is set.\n ///\n /// ```\n /// use winit::event::{ElementState, KeyEvent, WindowEvent};\n /// use winit::keyboard::{KeyCode, PhysicalKey};\n /// # let window_event = WindowEvent::RedrawRequested; // To make the example compile\n /// match window_event {\n /// WindowEvent::KeyboardInput {\n /// event:\n /// KeyEvent {\n /// physical_key: PhysicalKey::Code(KeyCode::KeyW),\n /// state: ElementState::Pressed,\n /// repeat: false,\n /// ..\n /// },\n /// ..\n /// } => {\n /// // The physical key `W` was pressed, and it was not a repeat\n /// },\n /// _ => {}, // Handle other events\n /// }\n /// ```\n pub repeat: bool,\n\n /// Platform-specific key event information.\n ///\n /// On Windows, Linux and macOS, this type contains the key without modifiers and the text with\n /// all modifiers applied.\n ///\n /// On Android, iOS, Redox and Web, this type is a no-op.\n pub(crate) platform_specific: platform_impl::KeyEventExtra,\n}"
],
"name": "key",
"type": "&KeyEvent"
},
{
"definitions": [
"pub enum Option<T> {\n /// No value.\n #[lang = \"None\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n None,\n /// Some value of type `T`.\n #[lang = \"Some\"]\n #[stable(feature = \"rust1\", since = \"1.0.0\")]\n Some(#[stable(feature = \"rust1\", since = \"1.0.0\")] T),\n}"
],
"name": "associated_text",
"type": "Option<&str>"
}
],
"end_line": 423,
"name": "try_build_textual",
"signature": "fn try_build_textual(\n &self,\n key: &KeyEvent,\n associated_text: Option<&str>,\n ) -> Option<SequenceBase>",
"start_line": 377
} | {
"class_name": "impl SequenceBuilder {\n /// Try building sequence from the event's emitting text.\n fn try_build_textual(\n &self,\n key: &KeyEvent,\n associated_text: Option<&str>,\n ) -> Option<SequenceBase> {\n let character = match key.logical_key.as_ref() {\n Key::Character(character) if self.kitty_seq => character,\n _ => return None,\n };\n\n if character.chars().count() == 1 {\n let shift = self.modifiers.contains(SequenceModifiers::SHIFT);\n\n let ch = character.chars().next().unwrap();\n let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };\n\n let alternate_key_code = u32::from(ch);\n let mut unicode_key_code = u32::from(unshifted_ch);\n\n // Try to get the base for keys which change based on modifier, like `1` for `!`.\n //\n // However it should only be performed when `SHIFT` is pressed.\n if shift && alternate_key_code == unicode_key_code {\n if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {\n unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));\n }\n }\n\n // NOTE: Base layouts are ignored, since winit doesn't expose this information\n // yet.\n let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)\n && alternate_key_code != unicode_key_code\n {\n format!(\"{unicode_key_code}:{alternate_key_code}\")\n } else {\n unicode_key_code.to_string()\n };\n\n Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))\n } else if self.kitty_encode_all && associated_text.is_some() {\n // Fallback when need to report text, but we don't have any key associated with this\n // text.\n Some(SequenceBase::new(\"0\".into(), SequenceTerminator::Kitty))\n } else {\n None\n }\n }\n\n /// Try building from numpad key.\n ///\n /// `None` is returned when the key is neither known nor numpad.\n fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {\n if !self.kitty_seq || key.location != KeyLocation::Numpad {\n return None;\n }\n\n let base = match key.logical_key.as_ref() {\n Key::Character(\"0\") => \"57399\",\n Key::Character(\"1\") => \"57400\",\n Key::Character(\"2\") => \"57401\",\n Key::Character(\"3\") => \"57402\",\n Key::Character(\"4\") => \"57403\",\n Key::Character(\"5\") => \"57404\",\n Key::Character(\"6\") => \"57405\",\n Key::Character(\"7\") => \"57406\",\n Key::Character(\"8\") => \"57407\",\n Key::Character(\"9\") => \"57408\",\n Key::Character(\".\") => \"57409\",\n Key::Character(\"/\") => \"57410\",\n Key::Character(\"*\") => \"57411\",\n Key::Character(\"-\") => \"57412\",\n Key::Character(\"+\") => \"57413\",\n Key::Character(\"=\") => \"57415\",\n Key::Named(named) => match named {\n NamedKey::Enter => \"57414\",\n NamedKey::ArrowLeft => \"57417\",\n NamedKey::ArrowRight => \"57418\",\n NamedKey::ArrowUp => \"57419\",\n NamedKey::ArrowDown => \"57420\",\n NamedKey::PageUp => \"57421\",\n NamedKey::PageDown => \"57422\",\n NamedKey::Home => \"57423\",\n NamedKey::End => \"57424\",\n NamedKey::Insert => \"57425\",\n NamedKey::Delete => \"57426\",\n _ => return None,\n },\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))\n }\n\n /// Try building from [`NamedKey`] using the kitty keyboard protocol encoding\n /// for functional keys.\n fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {\n let named = match key.logical_key {\n Key::Named(named) if self.kitty_seq => named,\n _ => return None,\n };\n\n let (base, terminator) = match named {\n // F3 in kitty protocol diverges from alacritty's terminfo.\n NamedKey::F3 => (\"13\", SequenceTerminator::Normal('~')),\n NamedKey::F13 => (\"57376\", SequenceTerminator::Kitty),\n NamedKey::F14 => (\"57377\", SequenceTerminator::Kitty),\n NamedKey::F15 => (\"57378\", SequenceTerminator::Kitty),\n NamedKey::F16 => (\"57379\", SequenceTerminator::Kitty),\n NamedKey::F17 => (\"57380\", SequenceTerminator::Kitty),\n NamedKey::F18 => (\"57381\", SequenceTerminator::Kitty),\n NamedKey::F19 => (\"57382\", SequenceTerminator::Kitty),\n NamedKey::F20 => (\"57383\", SequenceTerminator::Kitty),\n NamedKey::F21 => (\"57384\", SequenceTerminator::Kitty),\n NamedKey::F22 => (\"57385\", SequenceTerminator::Kitty),\n NamedKey::F23 => (\"57386\", SequenceTerminator::Kitty),\n NamedKey::F24 => (\"57387\", SequenceTerminator::Kitty),\n NamedKey::F25 => (\"57388\", SequenceTerminator::Kitty),\n NamedKey::F26 => (\"57389\", SequenceTerminator::Kitty),\n NamedKey::F27 => (\"57390\", SequenceTerminator::Kitty),\n NamedKey::F28 => (\"57391\", SequenceTerminator::Kitty),\n NamedKey::F29 => (\"57392\", SequenceTerminator::Kitty),\n NamedKey::F30 => (\"57393\", SequenceTerminator::Kitty),\n NamedKey::F31 => (\"57394\", SequenceTerminator::Kitty),\n NamedKey::F32 => (\"57395\", SequenceTerminator::Kitty),\n NamedKey::F33 => (\"57396\", SequenceTerminator::Kitty),\n NamedKey::F34 => (\"57397\", SequenceTerminator::Kitty),\n NamedKey::F35 => (\"57398\", SequenceTerminator::Kitty),\n NamedKey::ScrollLock => (\"57359\", SequenceTerminator::Kitty),\n NamedKey::PrintScreen => (\"57361\", SequenceTerminator::Kitty),\n NamedKey::Pause => (\"57362\", SequenceTerminator::Kitty),\n NamedKey::ContextMenu => (\"57363\", SequenceTerminator::Kitty),\n NamedKey::MediaPlay => (\"57428\", SequenceTerminator::Kitty),\n NamedKey::MediaPause => (\"57429\", SequenceTerminator::Kitty),\n NamedKey::MediaPlayPause => (\"57430\", SequenceTerminator::Kitty),\n NamedKey::MediaStop => (\"57432\", SequenceTerminator::Kitty),\n NamedKey::MediaFastForward => (\"57433\", SequenceTerminator::Kitty),\n NamedKey::MediaRewind => (\"57434\", SequenceTerminator::Kitty),\n NamedKey::MediaTrackNext => (\"57435\", SequenceTerminator::Kitty),\n NamedKey::MediaTrackPrevious => (\"57436\", SequenceTerminator::Kitty),\n NamedKey::MediaRecord => (\"57437\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeDown => (\"57438\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeUp => (\"57439\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeMute => (\"57440\", SequenceTerminator::Kitty),\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), terminator))\n }\n\n /// Try building from [`NamedKey`].\n fn try_build_named_normal(\n &self,\n key: &KeyEvent,\n has_associated_text: bool,\n ) -> Option<SequenceBase> {\n let named = match key.logical_key {\n Key::Named(named) => named,\n _ => return None,\n };\n\n // The default parameter is 1, so we can omit it.\n let one_based =\n if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {\n \"\"\n } else {\n \"1\"\n };\n let (base, terminator) = match named {\n NamedKey::PageUp => (\"5\", SequenceTerminator::Normal('~')),\n NamedKey::PageDown => (\"6\", SequenceTerminator::Normal('~')),\n NamedKey::Insert => (\"2\", SequenceTerminator::Normal('~')),\n NamedKey::Delete => (\"3\", SequenceTerminator::Normal('~')),\n NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),\n NamedKey::End => (one_based, SequenceTerminator::Normal('F')),\n NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),\n NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),\n NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),\n NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),\n NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),\n NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),\n NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),\n NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),\n NamedKey::F5 => (\"15\", SequenceTerminator::Normal('~')),\n NamedKey::F6 => (\"17\", SequenceTerminator::Normal('~')),\n NamedKey::F7 => (\"18\", SequenceTerminator::Normal('~')),\n NamedKey::F8 => (\"19\", SequenceTerminator::Normal('~')),\n NamedKey::F9 => (\"20\", SequenceTerminator::Normal('~')),\n NamedKey::F10 => (\"21\", SequenceTerminator::Normal('~')),\n NamedKey::F11 => (\"23\", SequenceTerminator::Normal('~')),\n NamedKey::F12 => (\"24\", SequenceTerminator::Normal('~')),\n NamedKey::F13 => (\"25\", SequenceTerminator::Normal('~')),\n NamedKey::F14 => (\"26\", SequenceTerminator::Normal('~')),\n NamedKey::F15 => (\"28\", SequenceTerminator::Normal('~')),\n NamedKey::F16 => (\"29\", SequenceTerminator::Normal('~')),\n NamedKey::F17 => (\"31\", SequenceTerminator::Normal('~')),\n NamedKey::F18 => (\"32\", SequenceTerminator::Normal('~')),\n NamedKey::F19 => (\"33\", SequenceTerminator::Normal('~')),\n NamedKey::F20 => (\"34\", SequenceTerminator::Normal('~')),\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), terminator))\n }\n\n /// Try building escape from control characters (e.g. Enter) and modifiers.\n fn try_build_control_char_or_mod(\n &self,\n key: &KeyEvent,\n mods: &mut SequenceModifiers,\n ) -> Option<SequenceBase> {\n if !self.kitty_encode_all && !self.kitty_seq {\n return None;\n }\n\n let named = match key.logical_key {\n Key::Named(named) => named,\n _ => return None,\n };\n\n let base = match named {\n NamedKey::Tab => \"9\",\n NamedKey::Enter => \"13\",\n NamedKey::Escape => \"27\",\n NamedKey::Space => \"32\",\n NamedKey::Backspace => \"127\",\n _ => \"\",\n };\n\n // Fail when the key is not a named control character and the active mode prohibits us\n // from encoding modifier keys.\n if !self.kitty_encode_all && base.is_empty() {\n return None;\n }\n\n let base = match (named, key.location) {\n (NamedKey::Shift, KeyLocation::Left) => \"57441\",\n (NamedKey::Control, KeyLocation::Left) => \"57442\",\n (NamedKey::Alt, KeyLocation::Left) => \"57443\",\n (NamedKey::Super, KeyLocation::Left) => \"57444\",\n (NamedKey::Hyper, KeyLocation::Left) => \"57445\",\n (NamedKey::Meta, KeyLocation::Left) => \"57446\",\n (NamedKey::Shift, _) => \"57447\",\n (NamedKey::Control, _) => \"57448\",\n (NamedKey::Alt, _) => \"57449\",\n (NamedKey::Super, _) => \"57450\",\n (NamedKey::Hyper, _) => \"57451\",\n (NamedKey::Meta, _) => \"57452\",\n (NamedKey::CapsLock, _) => \"57358\",\n (NamedKey::NumLock, _) => \"57360\",\n _ => base,\n };\n\n // NOTE: Kitty's protocol mandates that the modifier state is applied before\n // key press, however winit sends them after the key press, so for modifiers\n // itself apply the state based on keysyms and not the _actual_ modifiers\n // state, which is how kitty is doing so and what is suggested in such case.\n let press = key.state.is_pressed();\n match named {\n NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),\n NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),\n NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),\n NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),\n _ => (),\n }\n\n if base.is_empty() {\n None\n } else {\n Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))\n }\n }\n}",
"class_signature": "impl SequenceBuilder"
} |
try_build_named_normal | alacritty-master/alacritty/src/input/keyboard.rs | fn try_build_named_normal(
&self,
key: &KeyEvent,
has_associated_text: bool,
) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
// The default parameter is 1, so we can omit it.
let one_based =
if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {
""
} else {
"1"
};
let (base, terminator) = match named {
NamedKey::PageUp => ("5", SequenceTerminator::Normal('~')),
NamedKey::PageDown => ("6", SequenceTerminator::Normal('~')),
NamedKey::Insert => ("2", SequenceTerminator::Normal('~')),
NamedKey::Delete => ("3", SequenceTerminator::Normal('~')),
NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),
NamedKey::End => (one_based, SequenceTerminator::Normal('F')),
NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),
NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),
NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),
NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),
NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),
NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),
NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),
NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),
NamedKey::F5 => ("15", SequenceTerminator::Normal('~')),
NamedKey::F6 => ("17", SequenceTerminator::Normal('~')),
NamedKey::F7 => ("18", SequenceTerminator::Normal('~')),
NamedKey::F8 => ("19", SequenceTerminator::Normal('~')),
NamedKey::F9 => ("20", SequenceTerminator::Normal('~')),
NamedKey::F10 => ("21", SequenceTerminator::Normal('~')),
NamedKey::F11 => ("23", SequenceTerminator::Normal('~')),
NamedKey::F12 => ("24", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("25", SequenceTerminator::Normal('~')),
NamedKey::F14 => ("26", SequenceTerminator::Normal('~')),
NamedKey::F15 => ("28", SequenceTerminator::Normal('~')),
NamedKey::F16 => ("29", SequenceTerminator::Normal('~')),
NamedKey::F17 => ("31", SequenceTerminator::Normal('~')),
NamedKey::F18 => ("32", SequenceTerminator::Normal('~')),
NamedKey::F19 => ("33", SequenceTerminator::Normal('~')),
NamedKey::F20 => ("34", SequenceTerminator::Normal('~')),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
} | use std::borrow::Cow;
use winit::event::{ElementState, KeyEvent};
#[cfg(target_os = "macos")]
use winit::keyboard::ModifiersKeyState;
use winit::keyboard::{Key, KeyLocation, ModifiersState, NamedKey};
#[cfg(target_os = "macos")]
use winit::platform::macos::OptionAsAlt;
use alacritty_terminal::event::EventListener;
use alacritty_terminal::term::TermMode;
use winit::platform::modifier_supplement::KeyEventExtModifierSupplement;
use crate::config::{Action, BindingKey, BindingMode, KeyBinding};
use crate::event::TYPING_SEARCH_DELAY;
use crate::input::{ActionContext, Execute, Processor};
use crate::scheduler::{TimerId, Topic};
impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {
/// Process key input.
pub fn key_input(&mut self, key: KeyEvent) {
// IME input will be applied on commit and shouldn't trigger key bindings.
if self.ctx.display().ime.preedit().is_some() {
return;
}
let mode = *self.ctx.terminal().mode();
let mods = self.ctx.modifiers().state();
if key.state == ElementState::Released {
if self.ctx.inline_search_state().char_pending {
self.ctx.window().set_ime_allowed(true);
}
self.key_release(key, mode, mods);
return;
}
let text = key.text_with_all_modifiers().unwrap_or_default();
// All key bindings are disabled while a hint is being selected.
if self.ctx.display().hint_state.active() {
for character in text.chars() {
self.ctx.hint_input(character);
}
return;
}
// First key after inline search is captured.
let inline_state = self.ctx.inline_search_state();
if inline_state.char_pending {
self.ctx.inline_search_input(text);
return;
}
// Reset search delay when the user is still typing.
self.reset_search_delay();
// Key bindings suppress the character input.
if self.process_key_bindings(&key) {
return;
}
if self.ctx.search_active() {
for character in text.chars() {
self.ctx.search_input(character);
}
return;
}
// Vi mode on its own doesn't have any input, the search input was done before.
if mode.contains(TermMode::VI) {
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let build_key_sequence = Self::should_build_sequence(&key, text, mode, mods);
let is_modifier_key = Self::is_modifier_key(&key);
let bytes = if build_key_sequence {
build_sequence(key, mods, mode)
} else {
let mut bytes = Vec::with_capacity(text.len() + 1);
if mods.alt_key() {
bytes.push(b'\x1b');
}
bytes.extend_from_slice(text.as_bytes());
bytes
};
// Write only if we have something to write.
if !bytes.is_empty() {
// Don't clear selection/scroll down when writing escaped modifier keys.
if !is_modifier_key {
self.ctx.on_terminal_input_start();
}
self.ctx.write_to_pty(bytes);
}
}
fn alt_send_esc(&mut self, key: &KeyEvent, text: &str) -> bool {
#[cfg(not(target_os = "macos"))]
let alt_send_esc = self.ctx.modifiers().state().alt_key();
#[cfg(target_os = "macos")]
let alt_send_esc = {
let option_as_alt = self.ctx.config().window.option_as_alt();
self.ctx.modifiers().state().alt_key()
&& (option_as_alt == OptionAsAlt::Both
|| (option_as_alt == OptionAsAlt::OnlyLeft
&& self.ctx.modifiers().lalt_state() == ModifiersKeyState::Pressed)
|| (option_as_alt == OptionAsAlt::OnlyRight
&& self.ctx.modifiers().ralt_state() == ModifiersKeyState::Pressed))
};
match key.logical_key {
Key::Named(named) => {
if named.to_text().is_some() {
alt_send_esc
} else {
// Treat `Alt` as modifier for named keys without text, like ArrowUp.
self.ctx.modifiers().state().alt_key()
}
},
_ => alt_send_esc && text.chars().count() == 1,
}
}
fn is_modifier_key(key: &KeyEvent) -> bool {
matches!(
key.logical_key.as_ref(),
Key::Named(NamedKey::Shift)
| Key::Named(NamedKey::Control)
| Key::Named(NamedKey::Alt)
| Key::Named(NamedKey::Super)
)
}
/// Check whether we should try to build escape sequence for the [`KeyEvent`].
fn should_build_sequence(
key: &KeyEvent,
text: &str,
mode: TermMode,
mods: ModifiersState,
) -> bool {
if mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) {
return true;
}
let disambiguate = mode.contains(TermMode::DISAMBIGUATE_ESC_CODES)
&& (key.logical_key == Key::Named(NamedKey::Escape)
|| key.location == KeyLocation::Numpad
|| (!mods.is_empty()
&& (mods != ModifiersState::SHIFT
|| matches!(
key.logical_key,
Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Backspace)
))));
match key.logical_key {
_ if disambiguate => true,
// Exclude all the named keys unless they have textual representation.
Key::Named(named) => named.to_text().is_none(),
_ => text.is_empty(),
}
}
/// Attempt to find a binding and execute its action.
///
/// The provided mode, mods, and key must match what is allowed by a binding
/// for its action to be executed.
fn process_key_bindings(&mut self, key: &KeyEvent) -> bool {
let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());
let mods = self.ctx.modifiers().state();
// Don't suppress char if no bindings were triggered.
let mut suppress_chars = None;
// We don't want the key without modifier, because it means something else most of
// the time. However what we want is to manually lowercase the character to account
// for both small and capital letters on regular characters at the same time.
let logical_key = if let Key::Character(ch) = key.logical_key.as_ref() {
// Match `Alt` bindings without `Alt` being applied, otherwise they use the
// composed chars, which are not intuitive to bind.
//
// On Windows, the `Ctrl + Alt` mangles `logical_key` to unidentified values, thus
// preventing them from being used in bindings
//
// For more see https://github.com/rust-windowing/winit/issues/2945.
if (cfg!(target_os = "macos") || (cfg!(windows) && mods.control_key()))
&& mods.alt_key()
{
key.key_without_modifiers()
} else {
Key::Character(ch.to_lowercase().into())
}
} else {
key.logical_key.clone()
};
// Get the action of a key binding.
let mut binding_action = |binding: &KeyBinding| {
let key = match (&binding.trigger, &logical_key) {
(BindingKey::Scancode(_), _) => BindingKey::Scancode(key.physical_key),
(_, code) => {
BindingKey::Keycode { key: code.clone(), location: key.location.into() }
},
};
if binding.is_triggered_by(mode, mods, &key) {
// Pass through the key if any of the bindings has the `ReceiveChar` action.
*suppress_chars.get_or_insert(true) &= binding.action != Action::ReceiveChar;
// Binding was triggered; run the action.
Some(binding.action.clone())
} else {
None
}
};
// Trigger matching key bindings.
for i in 0..self.ctx.config().key_bindings().len() {
let binding = &self.ctx.config().key_bindings()[i];
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
// Trigger key bindings for hints.
for i in 0..self.ctx.config().hints.enabled.len() {
let hint = &self.ctx.config().hints.enabled[i];
let binding = match hint.binding.as_ref() {
Some(binding) => binding.key_binding(hint),
None => continue,
};
if let Some(action) = binding_action(binding) {
action.execute(&mut self.ctx);
}
}
suppress_chars.unwrap_or(false)
}
/// Handle key release.
fn key_release(&mut self, key: KeyEvent, mode: TermMode, mods: ModifiersState) {
if !mode.contains(TermMode::REPORT_EVENT_TYPES)
|| mode.contains(TermMode::VI)
|| self.ctx.search_active()
|| self.ctx.display().hint_state.active()
{
return;
}
// Mask `Alt` modifier from input when we won't send esc.
let text = key.text_with_all_modifiers().unwrap_or_default();
let mods = if self.alt_send_esc(&key, text) { mods } else { mods & !ModifiersState::ALT };
let bytes = match key.logical_key.as_ref() {
Key::Named(NamedKey::Enter)
| Key::Named(NamedKey::Tab)
| Key::Named(NamedKey::Backspace)
if !mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC) =>
{
return
},
_ => build_sequence(key, mods, mode),
};
self.ctx.write_to_pty(bytes);
}
/// Reset search delay.
fn reset_search_delay(&mut self) {
if self.ctx.search_active() {
let timer_id = TimerId::new(Topic::DelayedSearch, self.ctx.window().id());
let scheduler = self.ctx.scheduler_mut();
if let Some(timer) = scheduler.unschedule(timer_id) {
scheduler.schedule(timer.event, TYPING_SEARCH_DELAY, false, timer.id);
}
}
}
}
/// Build a key's keyboard escape sequence based on the given `key`, `mods`, and `mode`.
///
/// The key sequences for `APP_KEYPAD` and alike are handled inside the bindings.
#[inline(never)]
fn build_sequence(key: KeyEvent, mods: ModifiersState, mode: TermMode) -> Vec<u8> {
let mut modifiers = mods.into();
let kitty_seq = mode.intersects(
TermMode::REPORT_ALL_KEYS_AS_ESC
| TermMode::DISAMBIGUATE_ESC_CODES
| TermMode::REPORT_EVENT_TYPES,
);
let kitty_encode_all = mode.contains(TermMode::REPORT_ALL_KEYS_AS_ESC);
// The default parameter is 1, so we can omit it.
let kitty_event_type = mode.contains(TermMode::REPORT_EVENT_TYPES)
&& (key.repeat || key.state == ElementState::Released);
let context =
SequenceBuilder { mode, modifiers, kitty_seq, kitty_encode_all, kitty_event_type };
let associated_text = key.text_with_all_modifiers().filter(|text| {
mode.contains(TermMode::REPORT_ASSOCIATED_TEXT)
&& key.state != ElementState::Released
&& !text.is_empty()
&& !is_control_character(text)
});
let sequence_base = context
.try_build_numpad(&key)
.or_else(|| context.try_build_named_kitty(&key))
.or_else(|| context.try_build_named_normal(&key, associated_text.is_some()))
.or_else(|| context.try_build_control_char_or_mod(&key, &mut modifiers))
.or_else(|| context.try_build_textual(&key, associated_text));
let (payload, terminator) = match sequence_base {
Some(SequenceBase { payload, terminator }) => (payload, terminator),
_ => return Vec::new(),
};
let mut payload = format!("\x1b[{payload}");
// Add modifiers information.
if kitty_event_type || !modifiers.is_empty() || associated_text.is_some() {
payload.push_str(&format!(";{}", modifiers.encode_esc_sequence()));
}
// Push event type.
if kitty_event_type {
payload.push(':');
let event_type = match key.state {
_ if key.repeat => '2',
ElementState::Pressed => '1',
ElementState::Released => '3',
};
payload.push(event_type);
}
if let Some(text) = associated_text {
let mut codepoints = text.chars().map(u32::from);
if let Some(codepoint) = codepoints.next() {
payload.push_str(&format!(";{codepoint}"));
}
for codepoint in codepoints {
payload.push_str(&format!(":{codepoint}"));
}
}
payload.push(terminator.encode_esc_sequence());
payload.into_bytes()
}
/// Helper to build escape sequence payloads from [`KeyEvent`].
pub struct SequenceBuilder {
mode: TermMode,
/// The emitted sequence should follow the kitty keyboard protocol.
kitty_seq: bool,
/// Encode all the keys according to the protocol.
kitty_encode_all: bool,
/// Report event types.
kitty_event_type: bool,
modifiers: SequenceModifiers,
}
impl SequenceBuilder {
/// Try building sequence from the event's emitting text.
fn try_build_textual(
&self,
key: &KeyEvent,
associated_text: Option<&str>,
) -> Option<SequenceBase> {
let character = match key.logical_key.as_ref() {
Key::Character(character) if self.kitty_seq => character,
_ => return None,
};
if character.chars().count() == 1 {
let shift = self.modifiers.contains(SequenceModifiers::SHIFT);
let ch = character.chars().next().unwrap();
let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };
let alternate_key_code = u32::from(ch);
let mut unicode_key_code = u32::from(unshifted_ch);
// Try to get the base for keys which change based on modifier, like `1` for `!`.
//
// However it should only be performed when `SHIFT` is pressed.
if shift && alternate_key_code == unicode_key_code {
if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {
unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));
}
}
// NOTE: Base layouts are ignored, since winit doesn't expose this information
// yet.
let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)
&& alternate_key_code != unicode_key_code
{
format!("{unicode_key_code}:{alternate_key_code}")
} else {
unicode_key_code.to_string()
};
Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))
} else if self.kitty_encode_all && associated_text.is_some() {
// Fallback when need to report text, but we don't have any key associated with this
// text.
Some(SequenceBase::new("0".into(), SequenceTerminator::Kitty))
} else {
None
}
}
/// Try building from numpad key.
///
/// `None` is returned when the key is neither known nor numpad.
fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {
if !self.kitty_seq || key.location != KeyLocation::Numpad {
return None;
}
let base = match key.logical_key.as_ref() {
Key::Character("0") => "57399",
Key::Character("1") => "57400",
Key::Character("2") => "57401",
Key::Character("3") => "57402",
Key::Character("4") => "57403",
Key::Character("5") => "57404",
Key::Character("6") => "57405",
Key::Character("7") => "57406",
Key::Character("8") => "57407",
Key::Character("9") => "57408",
Key::Character(".") => "57409",
Key::Character("/") => "57410",
Key::Character("*") => "57411",
Key::Character("-") => "57412",
Key::Character("+") => "57413",
Key::Character("=") => "57415",
Key::Named(named) => match named {
NamedKey::Enter => "57414",
NamedKey::ArrowLeft => "57417",
NamedKey::ArrowRight => "57418",
NamedKey::ArrowUp => "57419",
NamedKey::ArrowDown => "57420",
NamedKey::PageUp => "57421",
NamedKey::PageDown => "57422",
NamedKey::Home => "57423",
NamedKey::End => "57424",
NamedKey::Insert => "57425",
NamedKey::Delete => "57426",
_ => return None,
},
_ => return None,
};
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
/// Try building from [`NamedKey`] using the kitty keyboard protocol encoding
/// for functional keys.
fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) if self.kitty_seq => named,
_ => return None,
};
let (base, terminator) = match named {
// F3 in kitty protocol diverges from alacritty's terminfo.
NamedKey::F3 => ("13", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("57376", SequenceTerminator::Kitty),
NamedKey::F14 => ("57377", SequenceTerminator::Kitty),
NamedKey::F15 => ("57378", SequenceTerminator::Kitty),
NamedKey::F16 => ("57379", SequenceTerminator::Kitty),
NamedKey::F17 => ("57380", SequenceTerminator::Kitty),
NamedKey::F18 => ("57381", SequenceTerminator::Kitty),
NamedKey::F19 => ("57382", SequenceTerminator::Kitty),
NamedKey::F20 => ("57383", SequenceTerminator::Kitty),
NamedKey::F21 => ("57384", SequenceTerminator::Kitty),
NamedKey::F22 => ("57385", SequenceTerminator::Kitty),
NamedKey::F23 => ("57386", SequenceTerminator::Kitty),
NamedKey::F24 => ("57387", SequenceTerminator::Kitty),
NamedKey::F25 => ("57388", SequenceTerminator::Kitty),
NamedKey::F26 => ("57389", SequenceTerminator::Kitty),
NamedKey::F27 => ("57390", SequenceTerminator::Kitty),
NamedKey::F28 => ("57391", SequenceTerminator::Kitty),
NamedKey::F29 => ("57392", SequenceTerminator::Kitty),
NamedKey::F30 => ("57393", SequenceTerminator::Kitty),
NamedKey::F31 => ("57394", SequenceTerminator::Kitty),
NamedKey::F32 => ("57395", SequenceTerminator::Kitty),
NamedKey::F33 => ("57396", SequenceTerminator::Kitty),
NamedKey::F34 => ("57397", SequenceTerminator::Kitty),
NamedKey::F35 => ("57398", SequenceTerminator::Kitty),
NamedKey::ScrollLock => ("57359", SequenceTerminator::Kitty),
NamedKey::PrintScreen => ("57361", SequenceTerminator::Kitty),
NamedKey::Pause => ("57362", SequenceTerminator::Kitty),
NamedKey::ContextMenu => ("57363", SequenceTerminator::Kitty),
NamedKey::MediaPlay => ("57428", SequenceTerminator::Kitty),
NamedKey::MediaPause => ("57429", SequenceTerminator::Kitty),
NamedKey::MediaPlayPause => ("57430", SequenceTerminator::Kitty),
NamedKey::MediaStop => ("57432", SequenceTerminator::Kitty),
NamedKey::MediaFastForward => ("57433", SequenceTerminator::Kitty),
NamedKey::MediaRewind => ("57434", SequenceTerminator::Kitty),
NamedKey::MediaTrackNext => ("57435", SequenceTerminator::Kitty),
NamedKey::MediaTrackPrevious => ("57436", SequenceTerminator::Kitty),
NamedKey::MediaRecord => ("57437", SequenceTerminator::Kitty),
NamedKey::AudioVolumeDown => ("57438", SequenceTerminator::Kitty),
NamedKey::AudioVolumeUp => ("57439", SequenceTerminator::Kitty),
NamedKey::AudioVolumeMute => ("57440", SequenceTerminator::Kitty),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building from [`NamedKey`].
fn try_build_named_normal(
&self,
key: &KeyEvent,
has_associated_text: bool,
) -> Option<SequenceBase> {
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
// The default parameter is 1, so we can omit it.
let one_based =
if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {
""
} else {
"1"
};
let (base, terminator) = match named {
NamedKey::PageUp => ("5", SequenceTerminator::Normal('~')),
NamedKey::PageDown => ("6", SequenceTerminator::Normal('~')),
NamedKey::Insert => ("2", SequenceTerminator::Normal('~')),
NamedKey::Delete => ("3", SequenceTerminator::Normal('~')),
NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),
NamedKey::End => (one_based, SequenceTerminator::Normal('F')),
NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),
NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),
NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),
NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),
NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),
NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),
NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),
NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),
NamedKey::F5 => ("15", SequenceTerminator::Normal('~')),
NamedKey::F6 => ("17", SequenceTerminator::Normal('~')),
NamedKey::F7 => ("18", SequenceTerminator::Normal('~')),
NamedKey::F8 => ("19", SequenceTerminator::Normal('~')),
NamedKey::F9 => ("20", SequenceTerminator::Normal('~')),
NamedKey::F10 => ("21", SequenceTerminator::Normal('~')),
NamedKey::F11 => ("23", SequenceTerminator::Normal('~')),
NamedKey::F12 => ("24", SequenceTerminator::Normal('~')),
NamedKey::F13 => ("25", SequenceTerminator::Normal('~')),
NamedKey::F14 => ("26", SequenceTerminator::Normal('~')),
NamedKey::F15 => ("28", SequenceTerminator::Normal('~')),
NamedKey::F16 => ("29", SequenceTerminator::Normal('~')),
NamedKey::F17 => ("31", SequenceTerminator::Normal('~')),
NamedKey::F18 => ("32", SequenceTerminator::Normal('~')),
NamedKey::F19 => ("33", SequenceTerminator::Normal('~')),
NamedKey::F20 => ("34", SequenceTerminator::Normal('~')),
_ => return None,
};
Some(SequenceBase::new(base.into(), terminator))
}
/// Try building escape from control characters (e.g. Enter) and modifiers.
fn try_build_control_char_or_mod(
&self,
key: &KeyEvent,
mods: &mut SequenceModifiers,
) -> Option<SequenceBase> {
if !self.kitty_encode_all && !self.kitty_seq {
return None;
}
let named = match key.logical_key {
Key::Named(named) => named,
_ => return None,
};
let base = match named {
NamedKey::Tab => "9",
NamedKey::Enter => "13",
NamedKey::Escape => "27",
NamedKey::Space => "32",
NamedKey::Backspace => "127",
_ => "",
};
// Fail when the key is not a named control character and the active mode prohibits us
// from encoding modifier keys.
if !self.kitty_encode_all && base.is_empty() {
return None;
}
let base = match (named, key.location) {
(NamedKey::Shift, KeyLocation::Left) => "57441",
(NamedKey::Control, KeyLocation::Left) => "57442",
(NamedKey::Alt, KeyLocation::Left) => "57443",
(NamedKey::Super, KeyLocation::Left) => "57444",
(NamedKey::Hyper, KeyLocation::Left) => "57445",
(NamedKey::Meta, KeyLocation::Left) => "57446",
(NamedKey::Shift, _) => "57447",
(NamedKey::Control, _) => "57448",
(NamedKey::Alt, _) => "57449",
(NamedKey::Super, _) => "57450",
(NamedKey::Hyper, _) => "57451",
(NamedKey::Meta, _) => "57452",
(NamedKey::CapsLock, _) => "57358",
(NamedKey::NumLock, _) => "57360",
_ => base,
};
// NOTE: Kitty's protocol mandates that the modifier state is applied before
// key press, however winit sends them after the key press, so for modifiers
// itself apply the state based on keysyms and not the _actual_ modifiers
// state, which is how kitty is doing so and what is suggested in such case.
let press = key.state.is_pressed();
match named {
NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),
NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),
NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),
NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),
_ => (),
}
if base.is_empty() {
None
} else {
Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))
}
}
}
pub struct SequenceBase {
/// The base of the payload, which is the `number` and optionally an alt base from the kitty
/// spec.
payload: Cow<'static, str>,
terminator: SequenceTerminator,
}
impl SequenceBase {
fn new(payload: Cow<'static, str>, terminator: SequenceTerminator) -> Self {
Self { payload, terminator }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SequenceTerminator {
/// The normal key esc sequence terminator defined by xterm/dec.
Normal(char),
/// The terminator is for kitty escape sequence.
Kitty,
}
impl SequenceTerminator {
fn encode_esc_sequence(self) -> char {
match self {
SequenceTerminator::Normal(char) => char,
SequenceTerminator::Kitty => 'u',
}
}
}
bitflags::bitflags! {
/// The modifiers encoding for escape sequence.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SequenceModifiers : u8 {
const SHIFT = 0b0000_0001;
const ALT = 0b0000_0010;
const CONTROL = 0b0000_0100;
const SUPER = 0b0000_1000;
// NOTE: Kitty protocol defines additional modifiers to what is present here, like
// Capslock, but it's not a modifier as per winit.
}
}
impl SequenceModifiers {
/// Get the value which should be passed to escape sequence.
pub fn encode_esc_sequence(self) -> u8 {
self.bits() + 1
}
}
impl From<ModifiersState> for SequenceModifiers {
fn from(mods: ModifiersState) -> Self {
let mut modifiers = Self::empty();
modifiers.set(Self::SHIFT, mods.shift_key());
modifiers.set(Self::ALT, mods.alt_key());
modifiers.set(Self::CONTROL, mods.control_key());
modifiers.set(Self::SUPER, mods.super_key());
modifiers
}
}
/// Check whether the `text` is `0x7f`, `C0` or `C1` control code.
fn is_control_character(text: &str) -> bool {
// 0x7f (DEL) is included here since it has a dedicated control code (`^?`) which generally
// does not match the reported text (`^H`), despite not technically being part of C0 or C1.
let codepoint = text.bytes().next().unwrap();
text.len() == 1 && (codepoint < 0x20 || (0x7f..=0x9f).contains(&codepoint))
}
| rust | {
"argument_definitions": [
{
"definitions": [
"pub struct KeyEvent {\n /// Represents the position of a key independent of the currently active layout.\n ///\n /// It also uniquely identifies the physical key (i.e. it's mostly synonymous with a scancode).\n /// The most prevalent use case for this is games. For example the default keys for the player\n /// to move around might be the W, A, S, and D keys on a US layout. The position of these keys\n /// is more important than their label, so they should map to Z, Q, S, and D on an \"AZERTY\"\n /// layout. (This value is `KeyCode::KeyW` for the Z key on an AZERTY layout.)\n ///\n /// ## Caveats\n ///\n /// - Certain niche hardware will shuffle around physical key positions, e.g. a keyboard that\n /// implements DVORAK in hardware (or firmware)\n /// - Your application will likely have to handle keyboards which are missing keys that your\n /// own keyboard has.\n /// - Certain `KeyCode`s will move between a couple of different positions depending on what\n /// layout the keyboard was manufactured to support.\n ///\n /// **Because of these caveats, it is important that you provide users with a way to configure\n /// most (if not all) keybinds in your application.**\n ///\n /// ## `Fn` and `FnLock`\n ///\n /// `Fn` and `FnLock` key events are *exceedingly unlikely* to be emitted by Winit. These keys\n /// are usually handled at the hardware or OS level, and aren't surfaced to applications. If\n /// you somehow see this in the wild, we'd like to know :)\n pub physical_key: keyboard::PhysicalKey,\n\n // Allowing `broken_intra_doc_links` for `logical_key`, because\n // `key_without_modifiers` is not available on all platforms\n #[cfg_attr(\n not(any(windows_platform, macos_platform, x11_platform, wayland_platform)),\n allow(rustdoc::broken_intra_doc_links)\n )]\n /// This value is affected by all modifiers except <kbd>Ctrl</kbd>.\n ///\n /// This has two use cases:\n /// - Allows querying whether the current input is a Dead key.\n /// - Allows handling key-bindings on platforms which don't support [`key_without_modifiers`].\n ///\n /// If you use this field (or [`key_without_modifiers`] for that matter) for keyboard\n /// shortcuts, **it is important that you provide users with a way to configure your\n /// application's shortcuts so you don't render your application unusable for users with an\n /// incompatible keyboard layout.**\n ///\n /// ## Platform-specific\n /// - **Web:** Dead keys might be reported as the real key instead of `Dead` depending on the\n /// browser/OS.\n ///\n /// [`key_without_modifiers`]: crate::platform::modifier_supplement::KeyEventExtModifierSupplement::key_without_modifiers\n pub logical_key: keyboard::Key,\n\n /// Contains the text produced by this keypress.\n ///\n /// In most cases this is identical to the content\n /// of the `Character` variant of `logical_key`.\n /// However, on Windows when a dead key was pressed earlier\n /// but cannot be combined with the character from this\n /// keypress, the produced text will consist of two characters:\n /// the dead-key-character followed by the character resulting\n /// from this keypress.\n ///\n /// An additional difference from `logical_key` is that\n /// this field stores the text representation of any key\n /// that has such a representation. For example when\n /// `logical_key` is `Key::Named(NamedKey::Enter)`, this field is `Some(\"\\r\")`.\n ///\n /// This is `None` if the current keypress cannot\n /// be interpreted as text.\n ///\n /// See also: `text_with_all_modifiers()`\n pub text: Option<SmolStr>,\n\n /// Contains the location of this key on the keyboard.\n ///\n /// Certain keys on the keyboard may appear in more than once place. For example, the \"Shift\"\n /// key appears on the left side of the QWERTY keyboard as well as the right side. However,\n /// both keys have the same symbolic value. Another example of this phenomenon is the \"1\"\n /// key, which appears both above the \"Q\" key and as the \"Keypad 1\" key.\n ///\n /// This field allows the user to differentiate between keys like this that have the same\n /// symbolic value but different locations on the keyboard.\n ///\n /// See the [`KeyLocation`] type for more details.\n ///\n /// [`KeyLocation`]: crate::keyboard::KeyLocation\n pub location: keyboard::KeyLocation,\n\n /// Whether the key is being pressed or released.\n ///\n /// See the [`ElementState`] type for more details.\n pub state: ElementState,\n\n /// Whether or not this key is a key repeat event.\n ///\n /// On some systems, holding down a key for some period of time causes that key to be repeated\n /// as though it were being pressed and released repeatedly. This field is `true` if and only\n /// if this event is the result of one of those repeats.\n ///\n /// # Example\n ///\n /// In games, you often want to ignore repated key events - this can be\n /// done by ignoring events where this property is set.\n ///\n /// ```\n /// use winit::event::{ElementState, KeyEvent, WindowEvent};\n /// use winit::keyboard::{KeyCode, PhysicalKey};\n /// # let window_event = WindowEvent::RedrawRequested; // To make the example compile\n /// match window_event {\n /// WindowEvent::KeyboardInput {\n /// event:\n /// KeyEvent {\n /// physical_key: PhysicalKey::Code(KeyCode::KeyW),\n /// state: ElementState::Pressed,\n /// repeat: false,\n /// ..\n /// },\n /// ..\n /// } => {\n /// // The physical key `W` was pressed, and it was not a repeat\n /// },\n /// _ => {}, // Handle other events\n /// }\n /// ```\n pub repeat: bool,\n\n /// Platform-specific key event information.\n ///\n /// On Windows, Linux and macOS, this type contains the key without modifiers and the text with\n /// all modifiers applied.\n ///\n /// On Android, iOS, Redox and Web, this type is a no-op.\n pub(crate) platform_specific: platform_impl::KeyEventExtra,\n}"
],
"name": "key",
"type": "&KeyEvent"
}
],
"end_line": 579,
"name": "try_build_named_normal",
"signature": "fn try_build_named_normal(\n &self,\n key: &KeyEvent,\n has_associated_text: bool,\n ) -> Option<SequenceBase>",
"start_line": 527
} | {
"class_name": "impl SequenceBuilder {\n /// Try building sequence from the event's emitting text.\n fn try_build_textual(\n &self,\n key: &KeyEvent,\n associated_text: Option<&str>,\n ) -> Option<SequenceBase> {\n let character = match key.logical_key.as_ref() {\n Key::Character(character) if self.kitty_seq => character,\n _ => return None,\n };\n\n if character.chars().count() == 1 {\n let shift = self.modifiers.contains(SequenceModifiers::SHIFT);\n\n let ch = character.chars().next().unwrap();\n let unshifted_ch = if shift { ch.to_lowercase().next().unwrap() } else { ch };\n\n let alternate_key_code = u32::from(ch);\n let mut unicode_key_code = u32::from(unshifted_ch);\n\n // Try to get the base for keys which change based on modifier, like `1` for `!`.\n //\n // However it should only be performed when `SHIFT` is pressed.\n if shift && alternate_key_code == unicode_key_code {\n if let Key::Character(unmodded) = key.key_without_modifiers().as_ref() {\n unicode_key_code = u32::from(unmodded.chars().next().unwrap_or(unshifted_ch));\n }\n }\n\n // NOTE: Base layouts are ignored, since winit doesn't expose this information\n // yet.\n let payload = if self.mode.contains(TermMode::REPORT_ALTERNATE_KEYS)\n && alternate_key_code != unicode_key_code\n {\n format!(\"{unicode_key_code}:{alternate_key_code}\")\n } else {\n unicode_key_code.to_string()\n };\n\n Some(SequenceBase::new(payload.into(), SequenceTerminator::Kitty))\n } else if self.kitty_encode_all && associated_text.is_some() {\n // Fallback when need to report text, but we don't have any key associated with this\n // text.\n Some(SequenceBase::new(\"0\".into(), SequenceTerminator::Kitty))\n } else {\n None\n }\n }\n\n /// Try building from numpad key.\n ///\n /// `None` is returned when the key is neither known nor numpad.\n fn try_build_numpad(&self, key: &KeyEvent) -> Option<SequenceBase> {\n if !self.kitty_seq || key.location != KeyLocation::Numpad {\n return None;\n }\n\n let base = match key.logical_key.as_ref() {\n Key::Character(\"0\") => \"57399\",\n Key::Character(\"1\") => \"57400\",\n Key::Character(\"2\") => \"57401\",\n Key::Character(\"3\") => \"57402\",\n Key::Character(\"4\") => \"57403\",\n Key::Character(\"5\") => \"57404\",\n Key::Character(\"6\") => \"57405\",\n Key::Character(\"7\") => \"57406\",\n Key::Character(\"8\") => \"57407\",\n Key::Character(\"9\") => \"57408\",\n Key::Character(\".\") => \"57409\",\n Key::Character(\"/\") => \"57410\",\n Key::Character(\"*\") => \"57411\",\n Key::Character(\"-\") => \"57412\",\n Key::Character(\"+\") => \"57413\",\n Key::Character(\"=\") => \"57415\",\n Key::Named(named) => match named {\n NamedKey::Enter => \"57414\",\n NamedKey::ArrowLeft => \"57417\",\n NamedKey::ArrowRight => \"57418\",\n NamedKey::ArrowUp => \"57419\",\n NamedKey::ArrowDown => \"57420\",\n NamedKey::PageUp => \"57421\",\n NamedKey::PageDown => \"57422\",\n NamedKey::Home => \"57423\",\n NamedKey::End => \"57424\",\n NamedKey::Insert => \"57425\",\n NamedKey::Delete => \"57426\",\n _ => return None,\n },\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))\n }\n\n /// Try building from [`NamedKey`] using the kitty keyboard protocol encoding\n /// for functional keys.\n fn try_build_named_kitty(&self, key: &KeyEvent) -> Option<SequenceBase> {\n let named = match key.logical_key {\n Key::Named(named) if self.kitty_seq => named,\n _ => return None,\n };\n\n let (base, terminator) = match named {\n // F3 in kitty protocol diverges from alacritty's terminfo.\n NamedKey::F3 => (\"13\", SequenceTerminator::Normal('~')),\n NamedKey::F13 => (\"57376\", SequenceTerminator::Kitty),\n NamedKey::F14 => (\"57377\", SequenceTerminator::Kitty),\n NamedKey::F15 => (\"57378\", SequenceTerminator::Kitty),\n NamedKey::F16 => (\"57379\", SequenceTerminator::Kitty),\n NamedKey::F17 => (\"57380\", SequenceTerminator::Kitty),\n NamedKey::F18 => (\"57381\", SequenceTerminator::Kitty),\n NamedKey::F19 => (\"57382\", SequenceTerminator::Kitty),\n NamedKey::F20 => (\"57383\", SequenceTerminator::Kitty),\n NamedKey::F21 => (\"57384\", SequenceTerminator::Kitty),\n NamedKey::F22 => (\"57385\", SequenceTerminator::Kitty),\n NamedKey::F23 => (\"57386\", SequenceTerminator::Kitty),\n NamedKey::F24 => (\"57387\", SequenceTerminator::Kitty),\n NamedKey::F25 => (\"57388\", SequenceTerminator::Kitty),\n NamedKey::F26 => (\"57389\", SequenceTerminator::Kitty),\n NamedKey::F27 => (\"57390\", SequenceTerminator::Kitty),\n NamedKey::F28 => (\"57391\", SequenceTerminator::Kitty),\n NamedKey::F29 => (\"57392\", SequenceTerminator::Kitty),\n NamedKey::F30 => (\"57393\", SequenceTerminator::Kitty),\n NamedKey::F31 => (\"57394\", SequenceTerminator::Kitty),\n NamedKey::F32 => (\"57395\", SequenceTerminator::Kitty),\n NamedKey::F33 => (\"57396\", SequenceTerminator::Kitty),\n NamedKey::F34 => (\"57397\", SequenceTerminator::Kitty),\n NamedKey::F35 => (\"57398\", SequenceTerminator::Kitty),\n NamedKey::ScrollLock => (\"57359\", SequenceTerminator::Kitty),\n NamedKey::PrintScreen => (\"57361\", SequenceTerminator::Kitty),\n NamedKey::Pause => (\"57362\", SequenceTerminator::Kitty),\n NamedKey::ContextMenu => (\"57363\", SequenceTerminator::Kitty),\n NamedKey::MediaPlay => (\"57428\", SequenceTerminator::Kitty),\n NamedKey::MediaPause => (\"57429\", SequenceTerminator::Kitty),\n NamedKey::MediaPlayPause => (\"57430\", SequenceTerminator::Kitty),\n NamedKey::MediaStop => (\"57432\", SequenceTerminator::Kitty),\n NamedKey::MediaFastForward => (\"57433\", SequenceTerminator::Kitty),\n NamedKey::MediaRewind => (\"57434\", SequenceTerminator::Kitty),\n NamedKey::MediaTrackNext => (\"57435\", SequenceTerminator::Kitty),\n NamedKey::MediaTrackPrevious => (\"57436\", SequenceTerminator::Kitty),\n NamedKey::MediaRecord => (\"57437\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeDown => (\"57438\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeUp => (\"57439\", SequenceTerminator::Kitty),\n NamedKey::AudioVolumeMute => (\"57440\", SequenceTerminator::Kitty),\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), terminator))\n }\n\n /// Try building from [`NamedKey`].\n fn try_build_named_normal(\n &self,\n key: &KeyEvent,\n has_associated_text: bool,\n ) -> Option<SequenceBase> {\n let named = match key.logical_key {\n Key::Named(named) => named,\n _ => return None,\n };\n\n // The default parameter is 1, so we can omit it.\n let one_based =\n if self.modifiers.is_empty() && !self.kitty_event_type && !has_associated_text {\n \"\"\n } else {\n \"1\"\n };\n let (base, terminator) = match named {\n NamedKey::PageUp => (\"5\", SequenceTerminator::Normal('~')),\n NamedKey::PageDown => (\"6\", SequenceTerminator::Normal('~')),\n NamedKey::Insert => (\"2\", SequenceTerminator::Normal('~')),\n NamedKey::Delete => (\"3\", SequenceTerminator::Normal('~')),\n NamedKey::Home => (one_based, SequenceTerminator::Normal('H')),\n NamedKey::End => (one_based, SequenceTerminator::Normal('F')),\n NamedKey::ArrowLeft => (one_based, SequenceTerminator::Normal('D')),\n NamedKey::ArrowRight => (one_based, SequenceTerminator::Normal('C')),\n NamedKey::ArrowUp => (one_based, SequenceTerminator::Normal('A')),\n NamedKey::ArrowDown => (one_based, SequenceTerminator::Normal('B')),\n NamedKey::F1 => (one_based, SequenceTerminator::Normal('P')),\n NamedKey::F2 => (one_based, SequenceTerminator::Normal('Q')),\n NamedKey::F3 => (one_based, SequenceTerminator::Normal('R')),\n NamedKey::F4 => (one_based, SequenceTerminator::Normal('S')),\n NamedKey::F5 => (\"15\", SequenceTerminator::Normal('~')),\n NamedKey::F6 => (\"17\", SequenceTerminator::Normal('~')),\n NamedKey::F7 => (\"18\", SequenceTerminator::Normal('~')),\n NamedKey::F8 => (\"19\", SequenceTerminator::Normal('~')),\n NamedKey::F9 => (\"20\", SequenceTerminator::Normal('~')),\n NamedKey::F10 => (\"21\", SequenceTerminator::Normal('~')),\n NamedKey::F11 => (\"23\", SequenceTerminator::Normal('~')),\n NamedKey::F12 => (\"24\", SequenceTerminator::Normal('~')),\n NamedKey::F13 => (\"25\", SequenceTerminator::Normal('~')),\n NamedKey::F14 => (\"26\", SequenceTerminator::Normal('~')),\n NamedKey::F15 => (\"28\", SequenceTerminator::Normal('~')),\n NamedKey::F16 => (\"29\", SequenceTerminator::Normal('~')),\n NamedKey::F17 => (\"31\", SequenceTerminator::Normal('~')),\n NamedKey::F18 => (\"32\", SequenceTerminator::Normal('~')),\n NamedKey::F19 => (\"33\", SequenceTerminator::Normal('~')),\n NamedKey::F20 => (\"34\", SequenceTerminator::Normal('~')),\n _ => return None,\n };\n\n Some(SequenceBase::new(base.into(), terminator))\n }\n\n /// Try building escape from control characters (e.g. Enter) and modifiers.\n fn try_build_control_char_or_mod(\n &self,\n key: &KeyEvent,\n mods: &mut SequenceModifiers,\n ) -> Option<SequenceBase> {\n if !self.kitty_encode_all && !self.kitty_seq {\n return None;\n }\n\n let named = match key.logical_key {\n Key::Named(named) => named,\n _ => return None,\n };\n\n let base = match named {\n NamedKey::Tab => \"9\",\n NamedKey::Enter => \"13\",\n NamedKey::Escape => \"27\",\n NamedKey::Space => \"32\",\n NamedKey::Backspace => \"127\",\n _ => \"\",\n };\n\n // Fail when the key is not a named control character and the active mode prohibits us\n // from encoding modifier keys.\n if !self.kitty_encode_all && base.is_empty() {\n return None;\n }\n\n let base = match (named, key.location) {\n (NamedKey::Shift, KeyLocation::Left) => \"57441\",\n (NamedKey::Control, KeyLocation::Left) => \"57442\",\n (NamedKey::Alt, KeyLocation::Left) => \"57443\",\n (NamedKey::Super, KeyLocation::Left) => \"57444\",\n (NamedKey::Hyper, KeyLocation::Left) => \"57445\",\n (NamedKey::Meta, KeyLocation::Left) => \"57446\",\n (NamedKey::Shift, _) => \"57447\",\n (NamedKey::Control, _) => \"57448\",\n (NamedKey::Alt, _) => \"57449\",\n (NamedKey::Super, _) => \"57450\",\n (NamedKey::Hyper, _) => \"57451\",\n (NamedKey::Meta, _) => \"57452\",\n (NamedKey::CapsLock, _) => \"57358\",\n (NamedKey::NumLock, _) => \"57360\",\n _ => base,\n };\n\n // NOTE: Kitty's protocol mandates that the modifier state is applied before\n // key press, however winit sends them after the key press, so for modifiers\n // itself apply the state based on keysyms and not the _actual_ modifiers\n // state, which is how kitty is doing so and what is suggested in such case.\n let press = key.state.is_pressed();\n match named {\n NamedKey::Shift => mods.set(SequenceModifiers::SHIFT, press),\n NamedKey::Control => mods.set(SequenceModifiers::CONTROL, press),\n NamedKey::Alt => mods.set(SequenceModifiers::ALT, press),\n NamedKey::Super => mods.set(SequenceModifiers::SUPER, press),\n _ => (),\n }\n\n if base.is_empty() {\n None\n } else {\n Some(SequenceBase::new(base.into(), SequenceTerminator::Kitty))\n }\n }\n}",
"class_signature": "impl SequenceBuilder"
} |
cell_side | alacritty-master/alacritty/src/input/mod.rs | fn cell_side(&self, x: usize) -> Side {
let size_info = self.ctx.size_info();
let cell_x =
x.saturating_sub(size_info.padding_x() as usize) % size_info.cell_width() as usize;
let half_cell_width = (size_info.cell_width() / 2.0) as usize;
let additional_padding =
(size_info.width() - size_info.padding_x() * 2.) % size_info.cell_width();
let end_of_grid = size_info.width() - size_info.padding_x() - additional_padding;
if cell_x > half_cell_width
// Edge case when mouse leaves the window.
|| x as f32 >= end_of_grid
{
Side::Right
} else {
Side::Left
}
} | //! Handle input from winit.
//!
//! Certain key combinations should send some escape sequence back to the PTY.
//! In order to figure that out, state about which modifier keys are pressed
//! needs to be tracked. Additionally, we need a bit of a state machine to
//! determine what to do when a non-modifier key is pressed.
use std::borrow::Cow;
use std::cmp::{max, min, Ordering};
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::mem;
use std::time::{Duration, Instant};
use log::debug;
use winit::dpi::PhysicalPosition;
use winit::event::{
ElementState, Modifiers, MouseButton, MouseScrollDelta, Touch as TouchEvent, TouchPhase,
};
#[cfg(target_os = "macos")]
use winit::event_loop::ActiveEventLoop;
use winit::keyboard::ModifiersState;
#[cfg(target_os = "macos")]
use winit::platform::macos::ActiveEventLoopExtMacOS;
use winit::window::CursorIcon;
use alacritty_terminal::event::EventListener;
use alacritty_terminal::grid::{Dimensions, Scroll};
use alacritty_terminal::index::{Boundary, Column, Direction, Point, Side};
use alacritty_terminal::selection::SelectionType;
use alacritty_terminal::term::search::Match;
use alacritty_terminal::term::{ClipboardType, Term, TermMode};
use alacritty_terminal::vi_mode::ViMotion;
use alacritty_terminal::vte::ansi::{ClearMode, Handler};
use crate::clipboard::Clipboard;
#[cfg(target_os = "macos")]
use crate::config::window::Decorations;
use crate::config::{Action, BindingMode, MouseAction, SearchAction, UiConfig, ViAction};
use crate::display::hint::HintMatch;
use crate::display::window::Window;
use crate::display::{Display, SizeInfo};
use crate::event::{
ClickState, Event, EventType, InlineSearchState, Mouse, TouchPurpose, TouchZoom,
};
use crate::message_bar::{self, Message};
use crate::scheduler::{Scheduler, TimerId, Topic};
pub mod keyboard;
/// Font size change interval in px.
pub const FONT_SIZE_STEP: f32 = 1.;
/// Interval for mouse scrolling during selection outside of the boundaries.
const SELECTION_SCROLLING_INTERVAL: Duration = Duration::from_millis(15);
/// Minimum number of pixels at the bottom/top where selection scrolling is performed.
const MIN_SELECTION_SCROLLING_HEIGHT: f64 = 5.;
/// Number of pixels for increasing the selection scrolling speed factor by one.
const SELECTION_SCROLLING_STEP: f64 = 20.;
/// Distance before a touch input is considered a drag.
const MAX_TAP_DISTANCE: f64 = 20.;
/// Threshold used for double_click/triple_click.
const CLICK_THRESHOLD: Duration = Duration::from_millis(400);
/// Processes input from winit.
///
/// An escape sequence may be emitted in case specific keys or key combinations
/// are activated.
pub struct Processor<T: EventListener, A: ActionContext<T>> {
pub ctx: A,
_phantom: PhantomData<T>,
}
pub trait ActionContext<T: EventListener> {
fn write_to_pty<B: Into<Cow<'static, [u8]>>>(&self, _data: B) {}
fn mark_dirty(&mut self) {}
fn size_info(&self) -> SizeInfo;
fn copy_selection(&mut self, _ty: ClipboardType) {}
fn start_selection(&mut self, _ty: SelectionType, _point: Point, _side: Side) {}
fn toggle_selection(&mut self, _ty: SelectionType, _point: Point, _side: Side) {}
fn update_selection(&mut self, _point: Point, _side: Side) {}
fn clear_selection(&mut self) {}
fn selection_is_empty(&self) -> bool;
fn mouse_mut(&mut self) -> &mut Mouse;
fn mouse(&self) -> &Mouse;
fn touch_purpose(&mut self) -> &mut TouchPurpose;
fn modifiers(&mut self) -> &mut Modifiers;
fn scroll(&mut self, _scroll: Scroll) {}
fn window(&mut self) -> &mut Window;
fn display(&mut self) -> &mut Display;
fn terminal(&self) -> &Term<T>;
fn terminal_mut(&mut self) -> &mut Term<T>;
fn spawn_new_instance(&mut self) {}
#[cfg(target_os = "macos")]
fn create_new_window(&mut self, _tabbing_id: Option<String>) {}
#[cfg(not(target_os = "macos"))]
fn create_new_window(&mut self) {}
fn change_font_size(&mut self, _delta: f32) {}
fn reset_font_size(&mut self) {}
fn pop_message(&mut self) {}
fn message(&self) -> Option<&Message>;
fn config(&self) -> &UiConfig;
#[cfg(target_os = "macos")]
fn event_loop(&self) -> &ActiveEventLoop;
fn mouse_mode(&self) -> bool;
fn clipboard_mut(&mut self) -> &mut Clipboard;
fn scheduler_mut(&mut self) -> &mut Scheduler;
fn start_search(&mut self, _direction: Direction) {}
fn start_seeded_search(&mut self, _direction: Direction, _text: String) {}
fn confirm_search(&mut self) {}
fn cancel_search(&mut self) {}
fn search_input(&mut self, _c: char) {}
fn search_pop_word(&mut self) {}
fn search_history_previous(&mut self) {}
fn search_history_next(&mut self) {}
fn search_next(&mut self, origin: Point, direction: Direction, side: Side) -> Option<Match>;
fn advance_search_origin(&mut self, _direction: Direction) {}
fn search_direction(&self) -> Direction;
fn search_active(&self) -> bool;
fn on_typing_start(&mut self) {}
fn toggle_vi_mode(&mut self) {}
fn inline_search_state(&mut self) -> &mut InlineSearchState;
fn start_inline_search(&mut self, _direction: Direction, _stop_short: bool) {}
fn inline_search_next(&mut self) {}
fn inline_search_input(&mut self, _text: &str) {}
fn inline_search_previous(&mut self) {}
fn hint_input(&mut self, _character: char) {}
fn trigger_hint(&mut self, _hint: &HintMatch) {}
fn expand_selection(&mut self) {}
fn semantic_word(&self, point: Point) -> String;
fn on_terminal_input_start(&mut self) {}
fn paste(&mut self, _text: &str, _bracketed: bool) {}
fn spawn_daemon<I, S>(&self, _program: &str, _args: I)
where
I: IntoIterator<Item = S> + Debug + Copy,
S: AsRef<OsStr>,
{
}
}
impl Action {
fn toggle_selection<T, A>(ctx: &mut A, ty: SelectionType)
where
A: ActionContext<T>,
T: EventListener,
{
ctx.toggle_selection(ty, ctx.terminal().vi_mode_cursor.point, Side::Left);
// Make sure initial selection is not empty.
if let Some(selection) = &mut ctx.terminal_mut().selection {
selection.include_all();
}
}
}
trait Execute<T: EventListener> {
fn execute<A: ActionContext<T>>(&self, ctx: &mut A);
}
impl<T: EventListener> Execute<T> for Action {
#[inline]
fn execute<A: ActionContext<T>>(&self, ctx: &mut A) {
match self {
Action::Esc(s) => ctx.paste(s, false),
Action::Command(program) => ctx.spawn_daemon(program.program(), program.args()),
Action::Hint(hint) => {
ctx.display().hint_state.start(hint.clone());
ctx.mark_dirty();
},
Action::ToggleViMode => {
ctx.on_typing_start();
ctx.toggle_vi_mode()
},
action @ (Action::ViMotion(_) | Action::Vi(_))
if !ctx.terminal().mode().contains(TermMode::VI) =>
{
debug!("Ignoring {action:?}: Vi mode inactive");
},
Action::ViMotion(motion) => {
ctx.on_typing_start();
ctx.terminal_mut().vi_motion(*motion);
ctx.mark_dirty();
},
Action::Vi(ViAction::ToggleNormalSelection) => {
Self::toggle_selection(ctx, SelectionType::Simple);
},
Action::Vi(ViAction::ToggleLineSelection) => {
Self::toggle_selection(ctx, SelectionType::Lines);
},
Action::Vi(ViAction::ToggleBlockSelection) => {
Self::toggle_selection(ctx, SelectionType::Block);
},
Action::Vi(ViAction::ToggleSemanticSelection) => {
Self::toggle_selection(ctx, SelectionType::Semantic);
},
Action::Vi(ViAction::Open) => {
let hint = ctx.display().vi_highlighted_hint.take();
if let Some(hint) = &hint {
ctx.mouse_mut().block_hint_launcher = false;
ctx.trigger_hint(hint);
}
ctx.display().vi_highlighted_hint = hint;
},
Action::Vi(ViAction::SearchNext) => {
ctx.on_typing_start();
let terminal = ctx.terminal();
let direction = ctx.search_direction();
let vi_point = terminal.vi_mode_cursor.point;
let origin = match direction {
Direction::Right => vi_point.add(terminal, Boundary::None, 1),
Direction::Left => vi_point.sub(terminal, Boundary::None, 1),
};
if let Some(regex_match) = ctx.search_next(origin, direction, Side::Left) {
ctx.terminal_mut().vi_goto_point(*regex_match.start());
ctx.mark_dirty();
}
},
Action::Vi(ViAction::SearchPrevious) => {
ctx.on_typing_start();
let terminal = ctx.terminal();
let direction = ctx.search_direction().opposite();
let vi_point = terminal.vi_mode_cursor.point;
let origin = match direction {
Direction::Right => vi_point.add(terminal, Boundary::None, 1),
Direction::Left => vi_point.sub(terminal, Boundary::None, 1),
};
if let Some(regex_match) = ctx.search_next(origin, direction, Side::Left) {
ctx.terminal_mut().vi_goto_point(*regex_match.start());
ctx.mark_dirty();
}
},
Action::Vi(ViAction::SearchStart) => {
let terminal = ctx.terminal();
let origin = terminal.vi_mode_cursor.point.sub(terminal, Boundary::None, 1);
if let Some(regex_match) = ctx.search_next(origin, Direction::Left, Side::Left) {
ctx.terminal_mut().vi_goto_point(*regex_match.start());
ctx.mark_dirty();
}
},
Action::Vi(ViAction::SearchEnd) => {
let terminal = ctx.terminal();
let origin = terminal.vi_mode_cursor.point.add(terminal, Boundary::None, 1);
if let Some(regex_match) = ctx.search_next(origin, Direction::Right, Side::Right) {
ctx.terminal_mut().vi_goto_point(*regex_match.end());
ctx.mark_dirty();
}
},
Action::Vi(ViAction::CenterAroundViCursor) => {
let term = ctx.terminal();
let display_offset = term.grid().display_offset() as i32;
let target = -display_offset + term.screen_lines() as i32 / 2 - 1;
let line = term.vi_mode_cursor.point.line;
let scroll_lines = target - line.0;
ctx.scroll(Scroll::Delta(scroll_lines));
},
Action::Vi(ViAction::InlineSearchForward) => {
ctx.start_inline_search(Direction::Right, false)
},
Action::Vi(ViAction::InlineSearchBackward) => {
ctx.start_inline_search(Direction::Left, false)
},
Action::Vi(ViAction::InlineSearchForwardShort) => {
ctx.start_inline_search(Direction::Right, true)
},
Action::Vi(ViAction::InlineSearchBackwardShort) => {
ctx.start_inline_search(Direction::Left, true)
},
Action::Vi(ViAction::InlineSearchNext) => ctx.inline_search_next(),
Action::Vi(ViAction::InlineSearchPrevious) => ctx.inline_search_previous(),
Action::Vi(ViAction::SemanticSearchForward | ViAction::SemanticSearchBackward) => {
let seed_text = match ctx.terminal().selection_to_string() {
Some(selection) if !selection.is_empty() => selection,
// Get semantic word at the vi cursor position.
_ => ctx.semantic_word(ctx.terminal().vi_mode_cursor.point),
};
if !seed_text.is_empty() {
let direction = match self {
Action::Vi(ViAction::SemanticSearchForward) => Direction::Right,
_ => Direction::Left,
};
ctx.start_seeded_search(direction, seed_text);
}
},
action @ Action::Search(_) if !ctx.search_active() => {
debug!("Ignoring {action:?}: Search mode inactive");
},
Action::Search(SearchAction::SearchFocusNext) => {
ctx.advance_search_origin(ctx.search_direction());
},
Action::Search(SearchAction::SearchFocusPrevious) => {
let direction = ctx.search_direction().opposite();
ctx.advance_search_origin(direction);
},
Action::Search(SearchAction::SearchConfirm) => ctx.confirm_search(),
Action::Search(SearchAction::SearchCancel) => ctx.cancel_search(),
Action::Search(SearchAction::SearchClear) => {
let direction = ctx.search_direction();
ctx.cancel_search();
ctx.start_search(direction);
},
Action::Search(SearchAction::SearchDeleteWord) => ctx.search_pop_word(),
Action::Search(SearchAction::SearchHistoryPrevious) => ctx.search_history_previous(),
Action::Search(SearchAction::SearchHistoryNext) => ctx.search_history_next(),
Action::Mouse(MouseAction::ExpandSelection) => ctx.expand_selection(),
Action::SearchForward => ctx.start_search(Direction::Right),
Action::SearchBackward => ctx.start_search(Direction::Left),
Action::Copy => ctx.copy_selection(ClipboardType::Clipboard),
#[cfg(not(any(target_os = "macos", windows)))]
Action::CopySelection => ctx.copy_selection(ClipboardType::Selection),
Action::ClearSelection => ctx.clear_selection(),
Action::Paste => {
let text = ctx.clipboard_mut().load(ClipboardType::Clipboard);
ctx.paste(&text, true);
},
Action::PasteSelection => {
let text = ctx.clipboard_mut().load(ClipboardType::Selection);
ctx.paste(&text, true);
},
Action::ToggleFullscreen => ctx.window().toggle_fullscreen(),
Action::ToggleMaximized => ctx.window().toggle_maximized(),
#[cfg(target_os = "macos")]
Action::ToggleSimpleFullscreen => ctx.window().toggle_simple_fullscreen(),
#[cfg(target_os = "macos")]
Action::Hide => ctx.event_loop().hide_application(),
#[cfg(target_os = "macos")]
Action::HideOtherApplications => ctx.event_loop().hide_other_applications(),
#[cfg(not(target_os = "macos"))]
Action::Hide => ctx.window().set_visible(false),
Action::Minimize => ctx.window().set_minimized(true),
Action::Quit => {
ctx.window().hold = false;
ctx.terminal_mut().exit();
},
Action::IncreaseFontSize => ctx.change_font_size(FONT_SIZE_STEP),
Action::DecreaseFontSize => ctx.change_font_size(-FONT_SIZE_STEP),
Action::ResetFontSize => ctx.reset_font_size(),
Action::ScrollPageUp
| Action::ScrollPageDown
| Action::ScrollHalfPageUp
| Action::ScrollHalfPageDown => {
// Move vi mode cursor.
let term = ctx.terminal_mut();
let (scroll, amount) = match self {
Action::ScrollPageUp => (Scroll::PageUp, term.screen_lines() as i32),
Action::ScrollPageDown => (Scroll::PageDown, -(term.screen_lines() as i32)),
Action::ScrollHalfPageUp => {
let amount = term.screen_lines() as i32 / 2;
(Scroll::Delta(amount), amount)
},
Action::ScrollHalfPageDown => {
let amount = -(term.screen_lines() as i32 / 2);
(Scroll::Delta(amount), amount)
},
_ => unreachable!(),
};
let old_vi_cursor = term.vi_mode_cursor;
term.vi_mode_cursor = term.vi_mode_cursor.scroll(term, amount);
if old_vi_cursor != term.vi_mode_cursor {
ctx.mark_dirty();
}
ctx.scroll(scroll);
},
Action::ScrollLineUp => ctx.scroll(Scroll::Delta(1)),
Action::ScrollLineDown => ctx.scroll(Scroll::Delta(-1)),
Action::ScrollToTop => {
ctx.scroll(Scroll::Top);
// Move vi mode cursor.
let topmost_line = ctx.terminal().topmost_line();
ctx.terminal_mut().vi_mode_cursor.point.line = topmost_line;
ctx.terminal_mut().vi_motion(ViMotion::FirstOccupied);
ctx.mark_dirty();
},
Action::ScrollToBottom => {
ctx.scroll(Scroll::Bottom);
// Move vi mode cursor.
let term = ctx.terminal_mut();
term.vi_mode_cursor.point.line = term.bottommost_line();
// Move to beginning twice, to always jump across linewraps.
term.vi_motion(ViMotion::FirstOccupied);
term.vi_motion(ViMotion::FirstOccupied);
ctx.mark_dirty();
},
Action::ClearHistory => ctx.terminal_mut().clear_screen(ClearMode::Saved),
Action::ClearLogNotice => ctx.pop_message(),
#[cfg(not(target_os = "macos"))]
Action::CreateNewWindow => ctx.create_new_window(),
Action::SpawnNewInstance => ctx.spawn_new_instance(),
#[cfg(target_os = "macos")]
Action::CreateNewWindow => ctx.create_new_window(None),
#[cfg(target_os = "macos")]
Action::CreateNewTab => {
// Tabs on macOS are not possible without decorations.
if ctx.config().window.decorations != Decorations::None {
let tabbing_id = Some(ctx.window().tabbing_id());
ctx.create_new_window(tabbing_id);
}
},
#[cfg(target_os = "macos")]
Action::SelectNextTab => ctx.window().select_next_tab(),
#[cfg(target_os = "macos")]
Action::SelectPreviousTab => ctx.window().select_previous_tab(),
#[cfg(target_os = "macos")]
Action::SelectTab1 => ctx.window().select_tab_at_index(0),
#[cfg(target_os = "macos")]
Action::SelectTab2 => ctx.window().select_tab_at_index(1),
#[cfg(target_os = "macos")]
Action::SelectTab3 => ctx.window().select_tab_at_index(2),
#[cfg(target_os = "macos")]
Action::SelectTab4 => ctx.window().select_tab_at_index(3),
#[cfg(target_os = "macos")]
Action::SelectTab5 => ctx.window().select_tab_at_index(4),
#[cfg(target_os = "macos")]
Action::SelectTab6 => ctx.window().select_tab_at_index(5),
#[cfg(target_os = "macos")]
Action::SelectTab7 => ctx.window().select_tab_at_index(6),
#[cfg(target_os = "macos")]
Action::SelectTab8 => ctx.window().select_tab_at_index(7),
#[cfg(target_os = "macos")]
Action::SelectTab9 => ctx.window().select_tab_at_index(8),
#[cfg(target_os = "macos")]
Action::SelectLastTab => ctx.window().select_last_tab(),
_ => (),
}
}
}
impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {
pub fn new(ctx: A) -> Self {
Self { ctx, _phantom: Default::default() }
}
#[inline]
pub fn mouse_moved(&mut self, position: PhysicalPosition<f64>) {
let size_info = self.ctx.size_info();
let (x, y) = position.into();
let lmb_pressed = self.ctx.mouse().left_button_state == ElementState::Pressed;
let rmb_pressed = self.ctx.mouse().right_button_state == ElementState::Pressed;
if !self.ctx.selection_is_empty() && (lmb_pressed || rmb_pressed) {
self.update_selection_scrolling(y);
}
let display_offset = self.ctx.terminal().grid().display_offset();
let old_point = self.ctx.mouse().point(&size_info, display_offset);
let x = x.clamp(0, size_info.width() as i32 - 1) as usize;
let y = y.clamp(0, size_info.height() as i32 - 1) as usize;
self.ctx.mouse_mut().x = x;
self.ctx.mouse_mut().y = y;
let inside_text_area = size_info.contains_point(x, y);
let cell_side = self.cell_side(x);
let point = self.ctx.mouse().point(&size_info, display_offset);
let cell_changed = old_point != point;
// If the mouse hasn't changed cells, do nothing.
if !cell_changed
&& self.ctx.mouse().cell_side == cell_side
&& self.ctx.mouse().inside_text_area == inside_text_area
{
return;
}
self.ctx.mouse_mut().inside_text_area = inside_text_area;
self.ctx.mouse_mut().cell_side = cell_side;
// Update mouse state and check for URL change.
let mouse_state = self.cursor_state();
self.ctx.window().set_mouse_cursor(mouse_state);
// Prompt hint highlight update.
self.ctx.mouse_mut().hint_highlight_dirty = true;
// Don't launch URLs if mouse has moved.
self.ctx.mouse_mut().block_hint_launcher = true;
if (lmb_pressed || rmb_pressed)
&& (self.ctx.modifiers().state().shift_key() || !self.ctx.mouse_mode())
{
self.ctx.update_selection(point, cell_side);
} else if cell_changed
&& self.ctx.terminal().mode().intersects(TermMode::MOUSE_MOTION | TermMode::MOUSE_DRAG)
{
if lmb_pressed {
self.mouse_report(32, ElementState::Pressed);
} else if self.ctx.mouse().middle_button_state == ElementState::Pressed {
self.mouse_report(33, ElementState::Pressed);
} else if self.ctx.mouse().right_button_state == ElementState::Pressed {
self.mouse_report(34, ElementState::Pressed);
} else if self.ctx.terminal().mode().contains(TermMode::MOUSE_MOTION) {
self.mouse_report(35, ElementState::Pressed);
}
}
}
/// Check which side of a cell an X coordinate lies on.
fn cell_side(&self, x: usize) -> Side {
let size_info = self.ctx.size_info();
let cell_x =
x.saturating_sub(size_info.padding_x() as usize) % size_info.cell_width() as usize;
let half_cell_width = (size_info.cell_width() / 2.0) as usize;
let additional_padding =
(size_info.width() - size_info.padding_x() * 2.) % size_info.cell_width();
let end_of_grid = size_info.width() - size_info.padding_x() - additional_padding;
if cell_x > half_cell_width
// Edge case when mouse leaves the window.
|| x as f32 >= end_of_grid
{
Side::Right
} else {
Side::Left
}
}
fn mouse_report(&mut self, button: u8, state: ElementState) {
let display_offset = self.ctx.terminal().grid().display_offset();
let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);
// Assure the mouse point is not in the scrollback.
if point.line < 0 {
return;
}
// Calculate modifiers value.
let mut mods = 0;
let modifiers = self.ctx.modifiers().state();
if modifiers.shift_key() {
mods += 4;
}
if modifiers.alt_key() {
mods += 8;
}
if modifiers.control_key() {
mods += 16;
}
// Report mouse events.
if self.ctx.terminal().mode().contains(TermMode::SGR_MOUSE) {
self.sgr_mouse_report(point, button + mods, state);
} else if let ElementState::Released = state {
self.normal_mouse_report(point, 3 + mods);
} else {
self.normal_mouse_report(point, button + mods);
}
}
fn normal_mouse_report(&mut self, point: Point, button: u8) {
let Point { line, column } = point;
let utf8 = self.ctx.terminal().mode().contains(TermMode::UTF8_MOUSE);
let max_point = if utf8 { 2015 } else { 223 };
if line >= max_point || column >= max_point {
return;
}
let mut msg = vec![b'\x1b', b'[', b'M', 32 + button];
let mouse_pos_encode = |pos: usize| -> Vec<u8> {
let pos = 32 + 1 + pos;
let first = 0xC0 + pos / 64;
let second = 0x80 + (pos & 63);
vec![first as u8, second as u8]
};
if utf8 && column >= Column(95) {
msg.append(&mut mouse_pos_encode(column.0));
} else {
msg.push(32 + 1 + column.0 as u8);
}
if utf8 && line >= 95 {
msg.append(&mut mouse_pos_encode(line.0 as usize));
} else {
msg.push(32 + 1 + line.0 as u8);
}
self.ctx.write_to_pty(msg);
}
fn sgr_mouse_report(&mut self, point: Point, button: u8, state: ElementState) {
let c = match state {
ElementState::Pressed => 'M',
ElementState::Released => 'm',
};
let msg = format!("\x1b[<{};{};{}{}", button, point.column + 1, point.line + 1, c);
self.ctx.write_to_pty(msg.into_bytes());
}
fn on_mouse_press(&mut self, button: MouseButton) {
// Handle mouse mode.
if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {
self.ctx.mouse_mut().click_state = ClickState::None;
let code = match button {
MouseButton::Left => 0,
MouseButton::Middle => 1,
MouseButton::Right => 2,
// Can't properly report more than three buttons..
MouseButton::Back | MouseButton::Forward | MouseButton::Other(_) => return,
};
self.mouse_report(code, ElementState::Pressed);
} else {
// Calculate time since the last click to handle double/triple clicks.
let now = Instant::now();
let elapsed = now - self.ctx.mouse().last_click_timestamp;
self.ctx.mouse_mut().last_click_timestamp = now;
// Update multi-click state.
self.ctx.mouse_mut().click_state = match self.ctx.mouse().click_state {
// Reset click state if button has changed.
_ if button != self.ctx.mouse().last_click_button => {
self.ctx.mouse_mut().last_click_button = button;
ClickState::Click
},
ClickState::Click if elapsed < CLICK_THRESHOLD => ClickState::DoubleClick,
ClickState::DoubleClick if elapsed < CLICK_THRESHOLD => ClickState::TripleClick,
_ => ClickState::Click,
};
// Load mouse point, treating message bar and padding as the closest cell.
let display_offset = self.ctx.terminal().grid().display_offset();
let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);
if let MouseButton::Left = button {
self.on_left_click(point)
}
}
}
/// Handle left click selection and vi mode cursor movement.
fn on_left_click(&mut self, point: Point) {
let side = self.ctx.mouse().cell_side;
let control = self.ctx.modifiers().state().control_key();
match self.ctx.mouse().click_state {
ClickState::Click => {
// Don't launch URLs if this click cleared the selection.
self.ctx.mouse_mut().block_hint_launcher = !self.ctx.selection_is_empty();
self.ctx.clear_selection();
// Start new empty selection.
if control {
self.ctx.start_selection(SelectionType::Block, point, side);
} else {
self.ctx.start_selection(SelectionType::Simple, point, side);
}
},
ClickState::DoubleClick if !control => {
self.ctx.mouse_mut().block_hint_launcher = true;
self.ctx.start_selection(SelectionType::Semantic, point, side);
},
ClickState::TripleClick if !control => {
self.ctx.mouse_mut().block_hint_launcher = true;
self.ctx.start_selection(SelectionType::Lines, point, side);
},
_ => (),
};
// Move vi mode cursor to mouse click position.
if self.ctx.terminal().mode().contains(TermMode::VI) && !self.ctx.search_active() {
self.ctx.terminal_mut().vi_mode_cursor.point = point;
self.ctx.mark_dirty();
}
}
fn on_mouse_release(&mut self, button: MouseButton) {
if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {
let code = match button {
MouseButton::Left => 0,
MouseButton::Middle => 1,
MouseButton::Right => 2,
// Can't properly report more than three buttons.
MouseButton::Back | MouseButton::Forward | MouseButton::Other(_) => return,
};
self.mouse_report(code, ElementState::Released);
return;
}
// Trigger hints highlighted by the mouse.
let hint = self.ctx.display().highlighted_hint.take();
if let Some(hint) = hint.as_ref().filter(|_| button == MouseButton::Left) {
self.ctx.trigger_hint(hint);
}
self.ctx.display().highlighted_hint = hint;
let timer_id = TimerId::new(Topic::SelectionScrolling, self.ctx.window().id());
self.ctx.scheduler_mut().unschedule(timer_id);
if let MouseButton::Left | MouseButton::Right = button {
// Copy selection on release, to prevent flooding the display server.
self.ctx.copy_selection(ClipboardType::Selection);
}
}
pub fn mouse_wheel_input(&mut self, delta: MouseScrollDelta, phase: TouchPhase) {
let multiplier = self.ctx.config().scrolling.multiplier;
match delta {
MouseScrollDelta::LineDelta(columns, lines) => {
let new_scroll_px_x = columns * self.ctx.size_info().cell_width();
let new_scroll_px_y = lines * self.ctx.size_info().cell_height();
self.scroll_terminal(
new_scroll_px_x as f64,
new_scroll_px_y as f64,
multiplier as f64,
);
},
MouseScrollDelta::PixelDelta(mut lpos) => {
match phase {
TouchPhase::Started => {
// Reset offset to zero.
self.ctx.mouse_mut().accumulated_scroll = Default::default();
},
TouchPhase::Moved => {
// When the angle between (x, 0) and (x, y) is lower than ~25 degrees
// (cosine is larger that 0.9) we consider this scrolling as horizontal.
if lpos.x.abs() / lpos.x.hypot(lpos.y) > 0.9 {
lpos.y = 0.;
} else {
lpos.x = 0.;
}
self.scroll_terminal(lpos.x, lpos.y, multiplier as f64);
},
_ => (),
}
},
}
}
fn scroll_terminal(&mut self, new_scroll_x_px: f64, new_scroll_y_px: f64, multiplier: f64) {
const MOUSE_WHEEL_UP: u8 = 64;
const MOUSE_WHEEL_DOWN: u8 = 65;
const MOUSE_WHEEL_LEFT: u8 = 66;
const MOUSE_WHEEL_RIGHT: u8 = 67;
let width = f64::from(self.ctx.size_info().cell_width());
let height = f64::from(self.ctx.size_info().cell_height());
if self.ctx.mouse_mode() {
self.ctx.mouse_mut().accumulated_scroll.x += new_scroll_x_px;
self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px;
let code = if new_scroll_y_px > 0. { MOUSE_WHEEL_UP } else { MOUSE_WHEEL_DOWN };
let lines = (self.ctx.mouse().accumulated_scroll.y / height).abs() as i32;
for _ in 0..lines {
self.mouse_report(code, ElementState::Pressed);
}
let code = if new_scroll_x_px > 0. { MOUSE_WHEEL_LEFT } else { MOUSE_WHEEL_RIGHT };
let columns = (self.ctx.mouse().accumulated_scroll.x / width).abs() as i32;
for _ in 0..columns {
self.mouse_report(code, ElementState::Pressed);
}
} else if self
.ctx
.terminal()
.mode()
.contains(TermMode::ALT_SCREEN | TermMode::ALTERNATE_SCROLL)
&& !self.ctx.modifiers().state().shift_key()
{
self.ctx.mouse_mut().accumulated_scroll.x += new_scroll_x_px * multiplier;
self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px * multiplier;
// The chars here are the same as for the respective arrow keys.
let line_cmd = if new_scroll_y_px > 0. { b'A' } else { b'B' };
let column_cmd = if new_scroll_x_px > 0. { b'D' } else { b'C' };
let lines = (self.ctx.mouse().accumulated_scroll.y / height).abs() as usize;
let columns = (self.ctx.mouse().accumulated_scroll.x / width).abs() as usize;
let mut content = Vec::with_capacity(3 * (lines + columns));
for _ in 0..lines {
content.push(0x1b);
content.push(b'O');
content.push(line_cmd);
}
for _ in 0..columns {
content.push(0x1b);
content.push(b'O');
content.push(column_cmd);
}
self.ctx.write_to_pty(content);
} else {
self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px * multiplier;
let lines = (self.ctx.mouse().accumulated_scroll.y / height) as i32;
if lines != 0 {
self.ctx.scroll(Scroll::Delta(lines));
}
}
self.ctx.mouse_mut().accumulated_scroll.x %= width;
self.ctx.mouse_mut().accumulated_scroll.y %= height;
}
pub fn on_focus_change(&mut self, is_focused: bool) {
if self.ctx.terminal().mode().contains(TermMode::FOCUS_IN_OUT) {
let chr = if is_focused { "I" } else { "O" };
let msg = format!("\x1b[{chr}");
self.ctx.write_to_pty(msg.into_bytes());
}
}
/// Handle touch input.
pub fn touch(&mut self, touch: TouchEvent) {
match touch.phase {
TouchPhase::Started => self.on_touch_start(touch),
TouchPhase::Moved => self.on_touch_motion(touch),
TouchPhase::Ended | TouchPhase::Cancelled => self.on_touch_end(touch),
}
}
/// Handle beginning of touch input.
pub fn on_touch_start(&mut self, touch: TouchEvent) {
let touch_purpose = self.ctx.touch_purpose();
*touch_purpose = match mem::take(touch_purpose) {
TouchPurpose::None => TouchPurpose::Tap(touch),
TouchPurpose::Tap(start) => TouchPurpose::Zoom(TouchZoom::new((start, touch))),
TouchPurpose::Zoom(zoom) => TouchPurpose::Invalid(zoom.slots()),
TouchPurpose::Scroll(event) | TouchPurpose::Select(event) => {
let mut set = HashSet::default();
set.insert(event.id);
TouchPurpose::Invalid(set)
},
TouchPurpose::Invalid(mut slots) => {
slots.insert(touch.id);
TouchPurpose::Invalid(slots)
},
};
}
/// Handle touch input movement.
pub fn on_touch_motion(&mut self, touch: TouchEvent) {
let touch_purpose = self.ctx.touch_purpose();
match touch_purpose {
TouchPurpose::None => (),
// Handle transition from tap to scroll/select.
TouchPurpose::Tap(start) => {
let delta_x = touch.location.x - start.location.x;
let delta_y = touch.location.y - start.location.y;
if delta_x.abs() > MAX_TAP_DISTANCE {
// Update gesture state.
let start_location = start.location;
*touch_purpose = TouchPurpose::Select(*start);
// Start simulated mouse input.
self.mouse_moved(start_location);
self.mouse_input(ElementState::Pressed, MouseButton::Left);
// Apply motion since touch start.
self.on_touch_motion(touch);
} else if delta_y.abs() > MAX_TAP_DISTANCE {
// Update gesture state.
*touch_purpose = TouchPurpose::Scroll(*start);
// Apply motion since touch start.
self.on_touch_motion(touch);
}
},
TouchPurpose::Zoom(zoom) => {
let font_delta = zoom.font_delta(touch);
self.ctx.change_font_size(font_delta);
},
TouchPurpose::Scroll(last_touch) => {
// Calculate delta and update last touch position.
let delta_y = touch.location.y - last_touch.location.y;
*touch_purpose = TouchPurpose::Scroll(touch);
// Use a fixed scroll factor for touchscreens, to accurately track finger motion.
self.scroll_terminal(0., delta_y, 1.0);
},
TouchPurpose::Select(_) => self.mouse_moved(touch.location),
TouchPurpose::Invalid(_) => (),
}
}
/// Handle end of touch input.
pub fn on_touch_end(&mut self, touch: TouchEvent) {
// Finalize the touch motion up to the release point.
self.on_touch_motion(touch);
let touch_purpose = self.ctx.touch_purpose();
match touch_purpose {
// Simulate LMB clicks.
TouchPurpose::Tap(start) => {
let start_location = start.location;
*touch_purpose = Default::default();
self.mouse_moved(start_location);
self.mouse_input(ElementState::Pressed, MouseButton::Left);
self.mouse_input(ElementState::Released, MouseButton::Left);
},
// Invalidate zoom once a finger was released.
TouchPurpose::Zoom(zoom) => {
let mut slots = zoom.slots();
slots.remove(&touch.id);
*touch_purpose = TouchPurpose::Invalid(slots);
},
// Reset touch state once all slots were released.
TouchPurpose::Invalid(slots) => {
slots.remove(&touch.id);
if slots.is_empty() {
*touch_purpose = Default::default();
}
},
// Release simulated LMB.
TouchPurpose::Select(_) => {
*touch_purpose = Default::default();
self.mouse_input(ElementState::Released, MouseButton::Left);
},
// Reset touch state on scroll finish.
TouchPurpose::Scroll(_) => *touch_purpose = Default::default(),
TouchPurpose::None => (),
}
}
/// Reset mouse cursor based on modifier and terminal state.
#[inline]
pub fn reset_mouse_cursor(&mut self) {
let mouse_state = self.cursor_state();
self.ctx.window().set_mouse_cursor(mouse_state);
}
/// Modifier state change.
pub fn modifiers_input(&mut self, modifiers: Modifiers) {
*self.ctx.modifiers() = modifiers;
// Prompt hint highlight update.
self.ctx.mouse_mut().hint_highlight_dirty = true;
// Update mouse state and check for URL change.
let mouse_state = self.cursor_state();
self.ctx.window().set_mouse_cursor(mouse_state);
}
pub fn mouse_input(&mut self, state: ElementState, button: MouseButton) {
match button {
MouseButton::Left => self.ctx.mouse_mut().left_button_state = state,
MouseButton::Middle => self.ctx.mouse_mut().middle_button_state = state,
MouseButton::Right => self.ctx.mouse_mut().right_button_state = state,
_ => (),
}
// Skip normal mouse events if the message bar has been clicked.
if self.message_bar_cursor_state() == Some(CursorIcon::Pointer)
&& state == ElementState::Pressed
{
let size = self.ctx.size_info();
let current_lines = self.ctx.message().map_or(0, |m| m.text(&size).len());
self.ctx.clear_selection();
self.ctx.pop_message();
// Reset cursor when message bar height changed or all messages are gone.
let new_lines = self.ctx.message().map_or(0, |m| m.text(&size).len());
let new_icon = match current_lines.cmp(&new_lines) {
Ordering::Less => CursorIcon::Default,
Ordering::Equal => CursorIcon::Pointer,
Ordering::Greater => {
if self.ctx.mouse_mode() {
CursorIcon::Default
} else {
CursorIcon::Text
}
},
};
self.ctx.window().set_mouse_cursor(new_icon);
} else {
match state {
ElementState::Pressed => {
// Process mouse press before bindings to update the `click_state`.
self.on_mouse_press(button);
self.process_mouse_bindings(button);
},
ElementState::Released => self.on_mouse_release(button),
}
}
}
/// Attempt to find a binding and execute its action.
///
/// The provided mode, mods, and key must match what is allowed by a binding
/// for its action to be executed.
fn process_mouse_bindings(&mut self, button: MouseButton) {
let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());
let mouse_mode = self.ctx.mouse_mode();
let mods = self.ctx.modifiers().state();
let mouse_bindings = self.ctx.config().mouse_bindings().to_owned();
// If mouse mode is active, also look for bindings without shift.
let fallback_allowed = mouse_mode && mods.contains(ModifiersState::SHIFT);
let mut exact_match_found = false;
for binding in &mouse_bindings {
// Don't trigger normal bindings in mouse mode unless Shift is pressed.
if binding.is_triggered_by(mode, mods, &button) && (fallback_allowed || !mouse_mode) {
binding.action.execute(&mut self.ctx);
exact_match_found = true;
}
}
if fallback_allowed && !exact_match_found {
let fallback_mods = mods & !ModifiersState::SHIFT;
for binding in &mouse_bindings {
if binding.is_triggered_by(mode, fallback_mods, &button) {
binding.action.execute(&mut self.ctx);
}
}
}
}
/// Check mouse icon state in relation to the message bar.
fn message_bar_cursor_state(&self) -> Option<CursorIcon> {
// Since search is above the message bar, the button is offset by search's height.
let search_height = usize::from(self.ctx.search_active());
// Calculate Y position of the end of the last terminal line.
let size = self.ctx.size_info();
let terminal_end = size.padding_y() as usize
+ size.cell_height() as usize * (size.screen_lines() + search_height);
let mouse = self.ctx.mouse();
let display_offset = self.ctx.terminal().grid().display_offset();
let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);
if self.ctx.message().is_none() || (mouse.y <= terminal_end) {
None
} else if mouse.y <= terminal_end + size.cell_height() as usize
&& point.column + message_bar::CLOSE_BUTTON_TEXT.len() >= size.columns()
{
Some(CursorIcon::Pointer)
} else {
Some(CursorIcon::Default)
}
}
/// Icon state of the cursor.
fn cursor_state(&mut self) -> CursorIcon {
let display_offset = self.ctx.terminal().grid().display_offset();
let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);
let hyperlink = self.ctx.terminal().grid()[point].hyperlink();
// Function to check if mouse is on top of a hint.
let hint_highlighted = |hint: &HintMatch| hint.should_highlight(point, hyperlink.as_ref());
if let Some(mouse_state) = self.message_bar_cursor_state() {
mouse_state
} else if self.ctx.display().highlighted_hint.as_ref().is_some_and(hint_highlighted) {
CursorIcon::Pointer
} else if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {
CursorIcon::Default
} else {
CursorIcon::Text
}
}
/// Handle automatic scrolling when selecting above/below the window.
fn update_selection_scrolling(&mut self, mouse_y: i32) {
let scale_factor = self.ctx.window().scale_factor;
let size = self.ctx.size_info();
let window_id = self.ctx.window().id();
let scheduler = self.ctx.scheduler_mut();
// Scale constants by DPI.
let min_height = (MIN_SELECTION_SCROLLING_HEIGHT * scale_factor) as i32;
let step = (SELECTION_SCROLLING_STEP * scale_factor) as i32;
// Compute the height of the scrolling areas.
let end_top = max(min_height, size.padding_y() as i32);
let text_area_bottom = size.padding_y() + size.screen_lines() as f32 * size.cell_height();
let start_bottom = min(size.height() as i32 - min_height, text_area_bottom as i32);
// Get distance from closest window boundary.
let delta = if mouse_y < end_top {
end_top - mouse_y + step
} else if mouse_y >= start_bottom {
start_bottom - mouse_y - step
} else {
scheduler.unschedule(TimerId::new(Topic::SelectionScrolling, window_id));
return;
};
// Scale number of lines scrolled based on distance to boundary.
let event = Event::new(EventType::Scroll(Scroll::Delta(delta / step)), Some(window_id));
// Schedule event.
let timer_id = TimerId::new(Topic::SelectionScrolling, window_id);
scheduler.unschedule(timer_id);
scheduler.schedule(event, SELECTION_SCROLLING_INTERVAL, true, timer_id);
}
}
#[cfg(test)]
mod tests {
use super::*;
use winit::event::{DeviceId, Event as WinitEvent, WindowEvent};
use winit::keyboard::Key;
use winit::window::WindowId;
use alacritty_terminal::event::Event as TerminalEvent;
use crate::config::Binding;
use crate::message_bar::MessageBuffer;
const KEY: Key<&'static str> = Key::Character("0");
struct MockEventProxy;
impl EventListener for MockEventProxy {}
struct ActionContext<'a, T> {
pub terminal: &'a mut Term<T>,
pub size_info: &'a SizeInfo,
pub mouse: &'a mut Mouse,
pub clipboard: &'a mut Clipboard,
pub message_buffer: &'a mut MessageBuffer,
pub modifiers: Modifiers,
config: &'a UiConfig,
inline_search_state: &'a mut InlineSearchState,
}
impl<T: EventListener> super::ActionContext<T> for ActionContext<'_, T> {
fn search_next(
&mut self,
_origin: Point,
_direction: Direction,
_side: Side,
) -> Option<Match> {
None
}
fn search_direction(&self) -> Direction {
Direction::Right
}
fn inline_search_state(&mut self) -> &mut InlineSearchState {
self.inline_search_state
}
fn search_active(&self) -> bool {
false
}
fn terminal(&self) -> &Term<T> {
self.terminal
}
fn terminal_mut(&mut self) -> &mut Term<T> {
self.terminal
}
fn size_info(&self) -> SizeInfo {
*self.size_info
}
fn selection_is_empty(&self) -> bool {
true
}
fn scroll(&mut self, scroll: Scroll) {
self.terminal.scroll_display(scroll);
}
fn mouse_mode(&self) -> bool {
false
}
#[inline]
fn mouse_mut(&mut self) -> &mut Mouse {
self.mouse
}
#[inline]
fn mouse(&self) -> &Mouse {
self.mouse
}
#[inline]
fn touch_purpose(&mut self) -> &mut TouchPurpose {
unimplemented!();
}
fn modifiers(&mut self) -> &mut Modifiers {
&mut self.modifiers
}
fn window(&mut self) -> &mut Window {
unimplemented!();
}
fn display(&mut self) -> &mut Display {
unimplemented!();
}
fn pop_message(&mut self) {
self.message_buffer.pop();
}
fn message(&self) -> Option<&Message> {
self.message_buffer.message()
}
fn config(&self) -> &UiConfig {
self.config
}
fn clipboard_mut(&mut self) -> &mut Clipboard {
self.clipboard
}
#[cfg(target_os = "macos")]
fn event_loop(&self) -> &ActiveEventLoop {
unimplemented!();
}
fn scheduler_mut(&mut self) -> &mut Scheduler {
unimplemented!();
}
fn semantic_word(&self, _point: Point) -> String {
unimplemented!();
}
}
macro_rules! test_clickstate {
{
name: $name:ident,
initial_state: $initial_state:expr,
initial_button: $initial_button:expr,
input: $input:expr,
end_state: $end_state:expr,
input_delay: $input_delay:expr,
} => {
#[test]
fn $name() {
let mut clipboard = Clipboard::new_nop();
let cfg = UiConfig::default();
let size = SizeInfo::new(
21.0,
51.0,
3.0,
3.0,
0.,
0.,
false,
);
let mut terminal = Term::new(cfg.term_options(), &size, MockEventProxy);
let mut mouse = Mouse {
click_state: $initial_state,
last_click_button: $initial_button,
last_click_timestamp: Instant::now() - $input_delay,
..Mouse::default()
};
let mut inline_search_state = InlineSearchState::default();
let mut message_buffer = MessageBuffer::default();
let context = ActionContext {
terminal: &mut terminal,
mouse: &mut mouse,
size_info: &size,
clipboard: &mut clipboard,
modifiers: Default::default(),
message_buffer: &mut message_buffer,
inline_search_state: &mut inline_search_state,
config: &cfg,
};
let mut processor = Processor::new(context);
let event: WinitEvent::<TerminalEvent> = $input;
if let WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state,
button,
..
},
..
} = event
{
processor.mouse_input(state, button);
};
assert_eq!(processor.ctx.mouse.click_state, $end_state);
}
}
}
macro_rules! test_process_binding {
{
name: $name:ident,
binding: $binding:expr,
triggers: $triggers:expr,
mode: $mode:expr,
mods: $mods:expr,
} => {
#[test]
fn $name() {
if $triggers {
assert!($binding.is_triggered_by($mode, $mods, &KEY));
} else {
assert!(!$binding.is_triggered_by($mode, $mods, &KEY));
}
}
}
}
test_clickstate! {
name: single_click,
initial_state: ClickState::None,
initial_button: MouseButton::Other(0),
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Left,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: Duration::ZERO,
}
test_clickstate! {
name: single_right_click,
initial_state: ClickState::None,
initial_button: MouseButton::Other(0),
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Right,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: Duration::ZERO,
}
test_clickstate! {
name: single_middle_click,
initial_state: ClickState::None,
initial_button: MouseButton::Other(0),
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Middle,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: Duration::ZERO,
}
test_clickstate! {
name: double_click,
initial_state: ClickState::Click,
initial_button: MouseButton::Left,
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Left,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::DoubleClick,
input_delay: Duration::ZERO,
}
test_clickstate! {
name: double_click_failed,
initial_state: ClickState::Click,
initial_button: MouseButton::Left,
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Left,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: CLICK_THRESHOLD,
}
test_clickstate! {
name: triple_click,
initial_state: ClickState::DoubleClick,
initial_button: MouseButton::Left,
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Left,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::TripleClick,
input_delay: Duration::ZERO,
}
test_clickstate! {
name: triple_click_failed,
initial_state: ClickState::DoubleClick,
initial_button: MouseButton::Left,
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Left,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: CLICK_THRESHOLD,
}
test_clickstate! {
name: multi_click_separate_buttons,
initial_state: ClickState::DoubleClick,
initial_button: MouseButton::Left,
input: WinitEvent::WindowEvent {
event: WindowEvent::MouseInput {
state: ElementState::Pressed,
button: MouseButton::Right,
device_id: DeviceId::dummy(),
},
window_id: WindowId::dummy(),
},
end_state: ClickState::Click,
input_delay: Duration::ZERO,
}
test_process_binding! {
name: process_binding_nomode_shiftmod_require_shift,
binding: Binding { trigger: KEY, mods: ModifiersState::SHIFT, action: Action::from("\x1b[1;2D"), mode: BindingMode::empty(), notmode: BindingMode::empty() },
triggers: true,
mode: BindingMode::empty(),
mods: ModifiersState::SHIFT,
}
test_process_binding! {
name: process_binding_nomode_nomod_require_shift,
binding: Binding { trigger: KEY, mods: ModifiersState::SHIFT, action: Action::from("\x1b[1;2D"), mode: BindingMode::empty(), notmode: BindingMode::empty() },
triggers: false,
mode: BindingMode::empty(),
mods: ModifiersState::empty(),
}
test_process_binding! {
name: process_binding_nomode_controlmod,
binding: Binding { trigger: KEY, mods: ModifiersState::CONTROL, action: Action::from("\x1b[1;5D"), mode: BindingMode::empty(), notmode: BindingMode::empty() },
triggers: true,
mode: BindingMode::empty(),
mods: ModifiersState::CONTROL,
}
test_process_binding! {
name: process_binding_nomode_nomod_require_not_appcursor,
binding: Binding { trigger: KEY, mods: ModifiersState::empty(), action: Action::from("\x1b[D"), mode: BindingMode::empty(), notmode: BindingMode::APP_CURSOR },
triggers: true,
mode: BindingMode::empty(),
mods: ModifiersState::empty(),
}
test_process_binding! {
name: process_binding_appcursormode_nomod_require_appcursor,
binding: Binding { trigger: KEY, mods: ModifiersState::empty(), action: Action::from("\x1bOD"), mode: BindingMode::APP_CURSOR, notmode: BindingMode::empty() },
triggers: true,
mode: BindingMode::APP_CURSOR,
mods: ModifiersState::empty(),
}
test_process_binding! {
name: process_binding_nomode_nomod_require_appcursor,
binding: Binding { trigger: KEY, mods: ModifiersState::empty(), action: Action::from("\x1bOD"), mode: BindingMode::APP_CURSOR, notmode: BindingMode::empty() },
triggers: false,
mode: BindingMode::empty(),
mods: ModifiersState::empty(),
}
test_process_binding! {
name: process_binding_appcursormode_appkeypadmode_nomod_require_appcursor,
binding: Binding { trigger: KEY, mods: ModifiersState::empty(), action: Action::from("\x1bOD"), mode: BindingMode::APP_CURSOR, notmode: BindingMode::empty() },
triggers: true,
mode: BindingMode::APP_CURSOR | BindingMode::APP_KEYPAD,
mods: ModifiersState::empty(),
}
test_process_binding! {
name: process_binding_fail_with_extra_mods,
binding: Binding { trigger: KEY, mods: ModifiersState::SUPER, action: Action::from("arst"), mode: BindingMode::empty(), notmode: BindingMode::empty() },
triggers: false,
mode: BindingMode::empty(),
mods: ModifiersState::ALT | ModifiersState::SUPER,
}
}
| rust | {
"argument_definitions": [],
"end_line": 537,
"name": "cell_side",
"signature": "fn cell_side(&self, x: usize) -> Side",
"start_line": 518
} | {
"class_name": "impl<T: EventListener, A: ActionContext<T>> Processor<T, A> {\n pub fn new(ctx: A) -> Self {\n Self { ctx, _phantom: Default::default() }\n }\n\n #[inline]\n pub fn mouse_moved(&mut self, position: PhysicalPosition<f64>) {\n let size_info = self.ctx.size_info();\n\n let (x, y) = position.into();\n\n let lmb_pressed = self.ctx.mouse().left_button_state == ElementState::Pressed;\n let rmb_pressed = self.ctx.mouse().right_button_state == ElementState::Pressed;\n if !self.ctx.selection_is_empty() && (lmb_pressed || rmb_pressed) {\n self.update_selection_scrolling(y);\n }\n\n let display_offset = self.ctx.terminal().grid().display_offset();\n let old_point = self.ctx.mouse().point(&size_info, display_offset);\n\n let x = x.clamp(0, size_info.width() as i32 - 1) as usize;\n let y = y.clamp(0, size_info.height() as i32 - 1) as usize;\n self.ctx.mouse_mut().x = x;\n self.ctx.mouse_mut().y = y;\n\n let inside_text_area = size_info.contains_point(x, y);\n let cell_side = self.cell_side(x);\n\n let point = self.ctx.mouse().point(&size_info, display_offset);\n let cell_changed = old_point != point;\n\n // If the mouse hasn't changed cells, do nothing.\n if !cell_changed\n && self.ctx.mouse().cell_side == cell_side\n && self.ctx.mouse().inside_text_area == inside_text_area\n {\n return;\n }\n\n self.ctx.mouse_mut().inside_text_area = inside_text_area;\n self.ctx.mouse_mut().cell_side = cell_side;\n\n // Update mouse state and check for URL change.\n let mouse_state = self.cursor_state();\n self.ctx.window().set_mouse_cursor(mouse_state);\n\n // Prompt hint highlight update.\n self.ctx.mouse_mut().hint_highlight_dirty = true;\n\n // Don't launch URLs if mouse has moved.\n self.ctx.mouse_mut().block_hint_launcher = true;\n\n if (lmb_pressed || rmb_pressed)\n && (self.ctx.modifiers().state().shift_key() || !self.ctx.mouse_mode())\n {\n self.ctx.update_selection(point, cell_side);\n } else if cell_changed\n && self.ctx.terminal().mode().intersects(TermMode::MOUSE_MOTION | TermMode::MOUSE_DRAG)\n {\n if lmb_pressed {\n self.mouse_report(32, ElementState::Pressed);\n } else if self.ctx.mouse().middle_button_state == ElementState::Pressed {\n self.mouse_report(33, ElementState::Pressed);\n } else if self.ctx.mouse().right_button_state == ElementState::Pressed {\n self.mouse_report(34, ElementState::Pressed);\n } else if self.ctx.terminal().mode().contains(TermMode::MOUSE_MOTION) {\n self.mouse_report(35, ElementState::Pressed);\n }\n }\n }\n\n /// Check which side of a cell an X coordinate lies on.\n fn cell_side(&self, x: usize) -> Side {\n let size_info = self.ctx.size_info();\n\n let cell_x =\n x.saturating_sub(size_info.padding_x() as usize) % size_info.cell_width() as usize;\n let half_cell_width = (size_info.cell_width() / 2.0) as usize;\n\n let additional_padding =\n (size_info.width() - size_info.padding_x() * 2.) % size_info.cell_width();\n let end_of_grid = size_info.width() - size_info.padding_x() - additional_padding;\n\n if cell_x > half_cell_width\n // Edge case when mouse leaves the window.\n || x as f32 >= end_of_grid\n {\n Side::Right\n } else {\n Side::Left\n }\n }\n\n fn mouse_report(&mut self, button: u8, state: ElementState) {\n let display_offset = self.ctx.terminal().grid().display_offset();\n let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);\n\n // Assure the mouse point is not in the scrollback.\n if point.line < 0 {\n return;\n }\n\n // Calculate modifiers value.\n let mut mods = 0;\n let modifiers = self.ctx.modifiers().state();\n if modifiers.shift_key() {\n mods += 4;\n }\n if modifiers.alt_key() {\n mods += 8;\n }\n if modifiers.control_key() {\n mods += 16;\n }\n\n // Report mouse events.\n if self.ctx.terminal().mode().contains(TermMode::SGR_MOUSE) {\n self.sgr_mouse_report(point, button + mods, state);\n } else if let ElementState::Released = state {\n self.normal_mouse_report(point, 3 + mods);\n } else {\n self.normal_mouse_report(point, button + mods);\n }\n }\n\n fn normal_mouse_report(&mut self, point: Point, button: u8) {\n let Point { line, column } = point;\n let utf8 = self.ctx.terminal().mode().contains(TermMode::UTF8_MOUSE);\n\n let max_point = if utf8 { 2015 } else { 223 };\n\n if line >= max_point || column >= max_point {\n return;\n }\n\n let mut msg = vec![b'\\x1b', b'[', b'M', 32 + button];\n\n let mouse_pos_encode = |pos: usize| -> Vec<u8> {\n let pos = 32 + 1 + pos;\n let first = 0xC0 + pos / 64;\n let second = 0x80 + (pos & 63);\n vec![first as u8, second as u8]\n };\n\n if utf8 && column >= Column(95) {\n msg.append(&mut mouse_pos_encode(column.0));\n } else {\n msg.push(32 + 1 + column.0 as u8);\n }\n\n if utf8 && line >= 95 {\n msg.append(&mut mouse_pos_encode(line.0 as usize));\n } else {\n msg.push(32 + 1 + line.0 as u8);\n }\n\n self.ctx.write_to_pty(msg);\n }\n\n fn sgr_mouse_report(&mut self, point: Point, button: u8, state: ElementState) {\n let c = match state {\n ElementState::Pressed => 'M',\n ElementState::Released => 'm',\n };\n\n let msg = format!(\"\\x1b[<{};{};{}{}\", button, point.column + 1, point.line + 1, c);\n self.ctx.write_to_pty(msg.into_bytes());\n }\n\n fn on_mouse_press(&mut self, button: MouseButton) {\n // Handle mouse mode.\n if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {\n self.ctx.mouse_mut().click_state = ClickState::None;\n\n let code = match button {\n MouseButton::Left => 0,\n MouseButton::Middle => 1,\n MouseButton::Right => 2,\n // Can't properly report more than three buttons..\n MouseButton::Back | MouseButton::Forward | MouseButton::Other(_) => return,\n };\n\n self.mouse_report(code, ElementState::Pressed);\n } else {\n // Calculate time since the last click to handle double/triple clicks.\n let now = Instant::now();\n let elapsed = now - self.ctx.mouse().last_click_timestamp;\n self.ctx.mouse_mut().last_click_timestamp = now;\n\n // Update multi-click state.\n self.ctx.mouse_mut().click_state = match self.ctx.mouse().click_state {\n // Reset click state if button has changed.\n _ if button != self.ctx.mouse().last_click_button => {\n self.ctx.mouse_mut().last_click_button = button;\n ClickState::Click\n },\n ClickState::Click if elapsed < CLICK_THRESHOLD => ClickState::DoubleClick,\n ClickState::DoubleClick if elapsed < CLICK_THRESHOLD => ClickState::TripleClick,\n _ => ClickState::Click,\n };\n\n // Load mouse point, treating message bar and padding as the closest cell.\n let display_offset = self.ctx.terminal().grid().display_offset();\n let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);\n\n if let MouseButton::Left = button {\n self.on_left_click(point)\n }\n }\n }\n\n /// Handle left click selection and vi mode cursor movement.\n fn on_left_click(&mut self, point: Point) {\n let side = self.ctx.mouse().cell_side;\n let control = self.ctx.modifiers().state().control_key();\n\n match self.ctx.mouse().click_state {\n ClickState::Click => {\n // Don't launch URLs if this click cleared the selection.\n self.ctx.mouse_mut().block_hint_launcher = !self.ctx.selection_is_empty();\n\n self.ctx.clear_selection();\n\n // Start new empty selection.\n if control {\n self.ctx.start_selection(SelectionType::Block, point, side);\n } else {\n self.ctx.start_selection(SelectionType::Simple, point, side);\n }\n },\n ClickState::DoubleClick if !control => {\n self.ctx.mouse_mut().block_hint_launcher = true;\n self.ctx.start_selection(SelectionType::Semantic, point, side);\n },\n ClickState::TripleClick if !control => {\n self.ctx.mouse_mut().block_hint_launcher = true;\n self.ctx.start_selection(SelectionType::Lines, point, side);\n },\n _ => (),\n };\n\n // Move vi mode cursor to mouse click position.\n if self.ctx.terminal().mode().contains(TermMode::VI) && !self.ctx.search_active() {\n self.ctx.terminal_mut().vi_mode_cursor.point = point;\n self.ctx.mark_dirty();\n }\n }\n\n fn on_mouse_release(&mut self, button: MouseButton) {\n if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {\n let code = match button {\n MouseButton::Left => 0,\n MouseButton::Middle => 1,\n MouseButton::Right => 2,\n // Can't properly report more than three buttons.\n MouseButton::Back | MouseButton::Forward | MouseButton::Other(_) => return,\n };\n self.mouse_report(code, ElementState::Released);\n return;\n }\n\n // Trigger hints highlighted by the mouse.\n let hint = self.ctx.display().highlighted_hint.take();\n if let Some(hint) = hint.as_ref().filter(|_| button == MouseButton::Left) {\n self.ctx.trigger_hint(hint);\n }\n self.ctx.display().highlighted_hint = hint;\n\n let timer_id = TimerId::new(Topic::SelectionScrolling, self.ctx.window().id());\n self.ctx.scheduler_mut().unschedule(timer_id);\n\n if let MouseButton::Left | MouseButton::Right = button {\n // Copy selection on release, to prevent flooding the display server.\n self.ctx.copy_selection(ClipboardType::Selection);\n }\n }\n\n pub fn mouse_wheel_input(&mut self, delta: MouseScrollDelta, phase: TouchPhase) {\n let multiplier = self.ctx.config().scrolling.multiplier;\n match delta {\n MouseScrollDelta::LineDelta(columns, lines) => {\n let new_scroll_px_x = columns * self.ctx.size_info().cell_width();\n let new_scroll_px_y = lines * self.ctx.size_info().cell_height();\n self.scroll_terminal(\n new_scroll_px_x as f64,\n new_scroll_px_y as f64,\n multiplier as f64,\n );\n },\n MouseScrollDelta::PixelDelta(mut lpos) => {\n match phase {\n TouchPhase::Started => {\n // Reset offset to zero.\n self.ctx.mouse_mut().accumulated_scroll = Default::default();\n },\n TouchPhase::Moved => {\n // When the angle between (x, 0) and (x, y) is lower than ~25 degrees\n // (cosine is larger that 0.9) we consider this scrolling as horizontal.\n if lpos.x.abs() / lpos.x.hypot(lpos.y) > 0.9 {\n lpos.y = 0.;\n } else {\n lpos.x = 0.;\n }\n\n self.scroll_terminal(lpos.x, lpos.y, multiplier as f64);\n },\n _ => (),\n }\n },\n }\n }\n\n fn scroll_terminal(&mut self, new_scroll_x_px: f64, new_scroll_y_px: f64, multiplier: f64) {\n const MOUSE_WHEEL_UP: u8 = 64;\n const MOUSE_WHEEL_DOWN: u8 = 65;\n const MOUSE_WHEEL_LEFT: u8 = 66;\n const MOUSE_WHEEL_RIGHT: u8 = 67;\n\n let width = f64::from(self.ctx.size_info().cell_width());\n let height = f64::from(self.ctx.size_info().cell_height());\n\n if self.ctx.mouse_mode() {\n self.ctx.mouse_mut().accumulated_scroll.x += new_scroll_x_px;\n self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px;\n\n let code = if new_scroll_y_px > 0. { MOUSE_WHEEL_UP } else { MOUSE_WHEEL_DOWN };\n let lines = (self.ctx.mouse().accumulated_scroll.y / height).abs() as i32;\n\n for _ in 0..lines {\n self.mouse_report(code, ElementState::Pressed);\n }\n\n let code = if new_scroll_x_px > 0. { MOUSE_WHEEL_LEFT } else { MOUSE_WHEEL_RIGHT };\n let columns = (self.ctx.mouse().accumulated_scroll.x / width).abs() as i32;\n\n for _ in 0..columns {\n self.mouse_report(code, ElementState::Pressed);\n }\n } else if self\n .ctx\n .terminal()\n .mode()\n .contains(TermMode::ALT_SCREEN | TermMode::ALTERNATE_SCROLL)\n && !self.ctx.modifiers().state().shift_key()\n {\n self.ctx.mouse_mut().accumulated_scroll.x += new_scroll_x_px * multiplier;\n self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px * multiplier;\n\n // The chars here are the same as for the respective arrow keys.\n let line_cmd = if new_scroll_y_px > 0. { b'A' } else { b'B' };\n let column_cmd = if new_scroll_x_px > 0. { b'D' } else { b'C' };\n\n let lines = (self.ctx.mouse().accumulated_scroll.y / height).abs() as usize;\n let columns = (self.ctx.mouse().accumulated_scroll.x / width).abs() as usize;\n\n let mut content = Vec::with_capacity(3 * (lines + columns));\n\n for _ in 0..lines {\n content.push(0x1b);\n content.push(b'O');\n content.push(line_cmd);\n }\n\n for _ in 0..columns {\n content.push(0x1b);\n content.push(b'O');\n content.push(column_cmd);\n }\n\n self.ctx.write_to_pty(content);\n } else {\n self.ctx.mouse_mut().accumulated_scroll.y += new_scroll_y_px * multiplier;\n\n let lines = (self.ctx.mouse().accumulated_scroll.y / height) as i32;\n\n if lines != 0 {\n self.ctx.scroll(Scroll::Delta(lines));\n }\n }\n\n self.ctx.mouse_mut().accumulated_scroll.x %= width;\n self.ctx.mouse_mut().accumulated_scroll.y %= height;\n }\n\n pub fn on_focus_change(&mut self, is_focused: bool) {\n if self.ctx.terminal().mode().contains(TermMode::FOCUS_IN_OUT) {\n let chr = if is_focused { \"I\" } else { \"O\" };\n\n let msg = format!(\"\\x1b[{chr}\");\n self.ctx.write_to_pty(msg.into_bytes());\n }\n }\n\n /// Handle touch input.\n pub fn touch(&mut self, touch: TouchEvent) {\n match touch.phase {\n TouchPhase::Started => self.on_touch_start(touch),\n TouchPhase::Moved => self.on_touch_motion(touch),\n TouchPhase::Ended | TouchPhase::Cancelled => self.on_touch_end(touch),\n }\n }\n\n /// Handle beginning of touch input.\n pub fn on_touch_start(&mut self, touch: TouchEvent) {\n let touch_purpose = self.ctx.touch_purpose();\n *touch_purpose = match mem::take(touch_purpose) {\n TouchPurpose::None => TouchPurpose::Tap(touch),\n TouchPurpose::Tap(start) => TouchPurpose::Zoom(TouchZoom::new((start, touch))),\n TouchPurpose::Zoom(zoom) => TouchPurpose::Invalid(zoom.slots()),\n TouchPurpose::Scroll(event) | TouchPurpose::Select(event) => {\n let mut set = HashSet::default();\n set.insert(event.id);\n TouchPurpose::Invalid(set)\n },\n TouchPurpose::Invalid(mut slots) => {\n slots.insert(touch.id);\n TouchPurpose::Invalid(slots)\n },\n };\n }\n\n /// Handle touch input movement.\n pub fn on_touch_motion(&mut self, touch: TouchEvent) {\n let touch_purpose = self.ctx.touch_purpose();\n match touch_purpose {\n TouchPurpose::None => (),\n // Handle transition from tap to scroll/select.\n TouchPurpose::Tap(start) => {\n let delta_x = touch.location.x - start.location.x;\n let delta_y = touch.location.y - start.location.y;\n if delta_x.abs() > MAX_TAP_DISTANCE {\n // Update gesture state.\n let start_location = start.location;\n *touch_purpose = TouchPurpose::Select(*start);\n\n // Start simulated mouse input.\n self.mouse_moved(start_location);\n self.mouse_input(ElementState::Pressed, MouseButton::Left);\n\n // Apply motion since touch start.\n self.on_touch_motion(touch);\n } else if delta_y.abs() > MAX_TAP_DISTANCE {\n // Update gesture state.\n *touch_purpose = TouchPurpose::Scroll(*start);\n\n // Apply motion since touch start.\n self.on_touch_motion(touch);\n }\n },\n TouchPurpose::Zoom(zoom) => {\n let font_delta = zoom.font_delta(touch);\n self.ctx.change_font_size(font_delta);\n },\n TouchPurpose::Scroll(last_touch) => {\n // Calculate delta and update last touch position.\n let delta_y = touch.location.y - last_touch.location.y;\n *touch_purpose = TouchPurpose::Scroll(touch);\n\n // Use a fixed scroll factor for touchscreens, to accurately track finger motion.\n self.scroll_terminal(0., delta_y, 1.0);\n },\n TouchPurpose::Select(_) => self.mouse_moved(touch.location),\n TouchPurpose::Invalid(_) => (),\n }\n }\n\n /// Handle end of touch input.\n pub fn on_touch_end(&mut self, touch: TouchEvent) {\n // Finalize the touch motion up to the release point.\n self.on_touch_motion(touch);\n\n let touch_purpose = self.ctx.touch_purpose();\n match touch_purpose {\n // Simulate LMB clicks.\n TouchPurpose::Tap(start) => {\n let start_location = start.location;\n *touch_purpose = Default::default();\n\n self.mouse_moved(start_location);\n self.mouse_input(ElementState::Pressed, MouseButton::Left);\n self.mouse_input(ElementState::Released, MouseButton::Left);\n },\n // Invalidate zoom once a finger was released.\n TouchPurpose::Zoom(zoom) => {\n let mut slots = zoom.slots();\n slots.remove(&touch.id);\n *touch_purpose = TouchPurpose::Invalid(slots);\n },\n // Reset touch state once all slots were released.\n TouchPurpose::Invalid(slots) => {\n slots.remove(&touch.id);\n if slots.is_empty() {\n *touch_purpose = Default::default();\n }\n },\n // Release simulated LMB.\n TouchPurpose::Select(_) => {\n *touch_purpose = Default::default();\n self.mouse_input(ElementState::Released, MouseButton::Left);\n },\n // Reset touch state on scroll finish.\n TouchPurpose::Scroll(_) => *touch_purpose = Default::default(),\n TouchPurpose::None => (),\n }\n }\n\n /// Reset mouse cursor based on modifier and terminal state.\n #[inline]\n pub fn reset_mouse_cursor(&mut self) {\n let mouse_state = self.cursor_state();\n self.ctx.window().set_mouse_cursor(mouse_state);\n }\n\n /// Modifier state change.\n pub fn modifiers_input(&mut self, modifiers: Modifiers) {\n *self.ctx.modifiers() = modifiers;\n\n // Prompt hint highlight update.\n self.ctx.mouse_mut().hint_highlight_dirty = true;\n\n // Update mouse state and check for URL change.\n let mouse_state = self.cursor_state();\n self.ctx.window().set_mouse_cursor(mouse_state);\n }\n\n pub fn mouse_input(&mut self, state: ElementState, button: MouseButton) {\n match button {\n MouseButton::Left => self.ctx.mouse_mut().left_button_state = state,\n MouseButton::Middle => self.ctx.mouse_mut().middle_button_state = state,\n MouseButton::Right => self.ctx.mouse_mut().right_button_state = state,\n _ => (),\n }\n\n // Skip normal mouse events if the message bar has been clicked.\n if self.message_bar_cursor_state() == Some(CursorIcon::Pointer)\n && state == ElementState::Pressed\n {\n let size = self.ctx.size_info();\n\n let current_lines = self.ctx.message().map_or(0, |m| m.text(&size).len());\n\n self.ctx.clear_selection();\n self.ctx.pop_message();\n\n // Reset cursor when message bar height changed or all messages are gone.\n let new_lines = self.ctx.message().map_or(0, |m| m.text(&size).len());\n\n let new_icon = match current_lines.cmp(&new_lines) {\n Ordering::Less => CursorIcon::Default,\n Ordering::Equal => CursorIcon::Pointer,\n Ordering::Greater => {\n if self.ctx.mouse_mode() {\n CursorIcon::Default\n } else {\n CursorIcon::Text\n }\n },\n };\n\n self.ctx.window().set_mouse_cursor(new_icon);\n } else {\n match state {\n ElementState::Pressed => {\n // Process mouse press before bindings to update the `click_state`.\n self.on_mouse_press(button);\n self.process_mouse_bindings(button);\n },\n ElementState::Released => self.on_mouse_release(button),\n }\n }\n }\n\n /// Attempt to find a binding and execute its action.\n ///\n /// The provided mode, mods, and key must match what is allowed by a binding\n /// for its action to be executed.\n fn process_mouse_bindings(&mut self, button: MouseButton) {\n let mode = BindingMode::new(self.ctx.terminal().mode(), self.ctx.search_active());\n let mouse_mode = self.ctx.mouse_mode();\n let mods = self.ctx.modifiers().state();\n let mouse_bindings = self.ctx.config().mouse_bindings().to_owned();\n\n // If mouse mode is active, also look for bindings without shift.\n let fallback_allowed = mouse_mode && mods.contains(ModifiersState::SHIFT);\n let mut exact_match_found = false;\n\n for binding in &mouse_bindings {\n // Don't trigger normal bindings in mouse mode unless Shift is pressed.\n if binding.is_triggered_by(mode, mods, &button) && (fallback_allowed || !mouse_mode) {\n binding.action.execute(&mut self.ctx);\n exact_match_found = true;\n }\n }\n\n if fallback_allowed && !exact_match_found {\n let fallback_mods = mods & !ModifiersState::SHIFT;\n for binding in &mouse_bindings {\n if binding.is_triggered_by(mode, fallback_mods, &button) {\n binding.action.execute(&mut self.ctx);\n }\n }\n }\n }\n\n /// Check mouse icon state in relation to the message bar.\n fn message_bar_cursor_state(&self) -> Option<CursorIcon> {\n // Since search is above the message bar, the button is offset by search's height.\n let search_height = usize::from(self.ctx.search_active());\n\n // Calculate Y position of the end of the last terminal line.\n let size = self.ctx.size_info();\n let terminal_end = size.padding_y() as usize\n + size.cell_height() as usize * (size.screen_lines() + search_height);\n\n let mouse = self.ctx.mouse();\n let display_offset = self.ctx.terminal().grid().display_offset();\n let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);\n\n if self.ctx.message().is_none() || (mouse.y <= terminal_end) {\n None\n } else if mouse.y <= terminal_end + size.cell_height() as usize\n && point.column + message_bar::CLOSE_BUTTON_TEXT.len() >= size.columns()\n {\n Some(CursorIcon::Pointer)\n } else {\n Some(CursorIcon::Default)\n }\n }\n\n /// Icon state of the cursor.\n fn cursor_state(&mut self) -> CursorIcon {\n let display_offset = self.ctx.terminal().grid().display_offset();\n let point = self.ctx.mouse().point(&self.ctx.size_info(), display_offset);\n let hyperlink = self.ctx.terminal().grid()[point].hyperlink();\n\n // Function to check if mouse is on top of a hint.\n let hint_highlighted = |hint: &HintMatch| hint.should_highlight(point, hyperlink.as_ref());\n\n if let Some(mouse_state) = self.message_bar_cursor_state() {\n mouse_state\n } else if self.ctx.display().highlighted_hint.as_ref().is_some_and(hint_highlighted) {\n CursorIcon::Pointer\n } else if !self.ctx.modifiers().state().shift_key() && self.ctx.mouse_mode() {\n CursorIcon::Default\n } else {\n CursorIcon::Text\n }\n }\n\n /// Handle automatic scrolling when selecting above/below the window.\n fn update_selection_scrolling(&mut self, mouse_y: i32) {\n let scale_factor = self.ctx.window().scale_factor;\n let size = self.ctx.size_info();\n let window_id = self.ctx.window().id();\n let scheduler = self.ctx.scheduler_mut();\n\n // Scale constants by DPI.\n let min_height = (MIN_SELECTION_SCROLLING_HEIGHT * scale_factor) as i32;\n let step = (SELECTION_SCROLLING_STEP * scale_factor) as i32;\n\n // Compute the height of the scrolling areas.\n let end_top = max(min_height, size.padding_y() as i32);\n let text_area_bottom = size.padding_y() + size.screen_lines() as f32 * size.cell_height();\n let start_bottom = min(size.height() as i32 - min_height, text_area_bottom as i32);\n\n // Get distance from closest window boundary.\n let delta = if mouse_y < end_top {\n end_top - mouse_y + step\n } else if mouse_y >= start_bottom {\n start_bottom - mouse_y - step\n } else {\n scheduler.unschedule(TimerId::new(Topic::SelectionScrolling, window_id));\n return;\n };\n\n // Scale number of lines scrolled based on distance to boundary.\n let event = Event::new(EventType::Scroll(Scroll::Delta(delta / step)), Some(window_id));\n\n // Schedule event.\n let timer_id = TimerId::new(Topic::SelectionScrolling, window_id);\n scheduler.unschedule(timer_id);\n scheduler.schedule(event, SELECTION_SCROLLING_INTERVAL, true, timer_id);\n }\n}",
"class_signature": "impl<T: EventListener, A: ActionContext<T>> Processor<T, A>"
} |
gelu | burn-main/crates/burn-autodiff/src/ops/activation.rs | fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gelu;
retro_unary!(RetroGelu, B::gelu);
impl<B: Backend> Backward<B, 1> for Gelu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::gelu_backward(input, grad)
});
}
}
match Gelu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroGelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::gelu(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),
}
} | use core::marker::PhantomData;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, retro_forward::RetroForward, state::BackwardStates,
strategy::CheckpointStrategy,
},
grads::Gradients,
graph::NodeID,
ops::{Backward, Ops, OpsKind, unary},
retro_unary,
};
use burn_tensor::{
backend::Backend,
ops::{ActivationOps, FloatTensor},
};
impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {
fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gelu;
retro_unary!(RetroGelu, B::gelu);
impl<B: Backend> Backward<B, 1> for Gelu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::gelu_backward(input, grad)
});
}
}
match Gelu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroGelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::gelu(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),
}
}
fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Relu;
retro_unary!(RetroRelu, B::relu);
impl<B: Backend> Backward<B, 1> for Relu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::relu_backward(state, grad)
});
}
}
match Relu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::relu(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),
}
}
fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sigmoid;
retro_unary!(RetroSigmoid, B::sigmoid);
impl<B: Backend> Backward<B, 1> for Sigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::sigmoid(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::sigmoid_backward(output, grad)
});
}
}
match Sigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::sigmoid(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),
}
}
fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct LogSigmoid;
retro_unary!(RetroLogSigmoid, B::log_sigmoid);
impl<B: Backend> Backward<B, 1> for LogSigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::log_sigmoid_backward(input, grad)
});
}
}
match LogSigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}"
],
"name": "tensor",
"type": "FloatTensor<Self>"
}
],
"end_line": 56,
"name": "gelu",
"signature": "fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 20
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C>"
} |
relu | burn-main/crates/burn-autodiff/src/ops/activation.rs | fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Relu;
retro_unary!(RetroRelu, B::relu);
impl<B: Backend> Backward<B, 1> for Relu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::relu_backward(state, grad)
});
}
}
match Relu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::relu(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),
}
} | use core::marker::PhantomData;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, retro_forward::RetroForward, state::BackwardStates,
strategy::CheckpointStrategy,
},
grads::Gradients,
graph::NodeID,
ops::{Backward, Ops, OpsKind, unary},
retro_unary,
};
use burn_tensor::{
backend::Backend,
ops::{ActivationOps, FloatTensor},
};
impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {
fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gelu;
retro_unary!(RetroGelu, B::gelu);
impl<B: Backend> Backward<B, 1> for Gelu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::gelu_backward(input, grad)
});
}
}
match Gelu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroGelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::gelu(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),
}
}
fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Relu;
retro_unary!(RetroRelu, B::relu);
impl<B: Backend> Backward<B, 1> for Relu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::relu_backward(state, grad)
});
}
}
match Relu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::relu(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),
}
}
fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sigmoid;
retro_unary!(RetroSigmoid, B::sigmoid);
impl<B: Backend> Backward<B, 1> for Sigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::sigmoid(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::sigmoid_backward(output, grad)
});
}
}
match Sigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::sigmoid(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),
}
}
fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct LogSigmoid;
retro_unary!(RetroLogSigmoid, B::log_sigmoid);
impl<B: Backend> Backward<B, 1> for LogSigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::log_sigmoid_backward(input, grad)
});
}
}
match LogSigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}"
],
"name": "tensor",
"type": "FloatTensor<Self>"
}
],
"end_line": 93,
"name": "relu",
"signature": "fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 58
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C>"
} |
sigmoid | burn-main/crates/burn-autodiff/src/ops/activation.rs | fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sigmoid;
retro_unary!(RetroSigmoid, B::sigmoid);
impl<B: Backend> Backward<B, 1> for Sigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::sigmoid(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::sigmoid_backward(output, grad)
});
}
}
match Sigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::sigmoid(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),
}
} | use core::marker::PhantomData;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, retro_forward::RetroForward, state::BackwardStates,
strategy::CheckpointStrategy,
},
grads::Gradients,
graph::NodeID,
ops::{Backward, Ops, OpsKind, unary},
retro_unary,
};
use burn_tensor::{
backend::Backend,
ops::{ActivationOps, FloatTensor},
};
impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {
fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gelu;
retro_unary!(RetroGelu, B::gelu);
impl<B: Backend> Backward<B, 1> for Gelu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::gelu_backward(input, grad)
});
}
}
match Gelu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroGelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::gelu(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),
}
}
fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Relu;
retro_unary!(RetroRelu, B::relu);
impl<B: Backend> Backward<B, 1> for Relu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::relu_backward(state, grad)
});
}
}
match Relu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::relu(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),
}
}
fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sigmoid;
retro_unary!(RetroSigmoid, B::sigmoid);
impl<B: Backend> Backward<B, 1> for Sigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::sigmoid(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::sigmoid_backward(output, grad)
});
}
}
match Sigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::sigmoid(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),
}
}
fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct LogSigmoid;
retro_unary!(RetroLogSigmoid, B::log_sigmoid);
impl<B: Backend> Backward<B, 1> for LogSigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::log_sigmoid_backward(input, grad)
});
}
}
match LogSigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}"
],
"name": "tensor",
"type": "FloatTensor<Self>"
}
],
"end_line": 131,
"name": "sigmoid",
"signature": "fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 95
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C>"
} |
log_sigmoid | burn-main/crates/burn-autodiff/src/ops/activation.rs | fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct LogSigmoid;
retro_unary!(RetroLogSigmoid, B::log_sigmoid);
impl<B: Backend> Backward<B, 1> for LogSigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::log_sigmoid_backward(input, grad)
});
}
}
match LogSigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),
}
} | use core::marker::PhantomData;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, retro_forward::RetroForward, state::BackwardStates,
strategy::CheckpointStrategy,
},
grads::Gradients,
graph::NodeID,
ops::{Backward, Ops, OpsKind, unary},
retro_unary,
};
use burn_tensor::{
backend::Backend,
ops::{ActivationOps, FloatTensor},
};
impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {
fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gelu;
retro_unary!(RetroGelu, B::gelu);
impl<B: Backend> Backward<B, 1> for Gelu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::gelu_backward(input, grad)
});
}
}
match Gelu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroGelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::gelu(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),
}
}
fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Relu;
retro_unary!(RetroRelu, B::relu);
impl<B: Backend> Backward<B, 1> for Relu {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::relu_backward(state, grad)
});
}
}
match Relu
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRelu::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::relu(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),
}
}
fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sigmoid;
retro_unary!(RetroSigmoid, B::sigmoid);
impl<B: Backend> Backward<B, 1> for Sigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::sigmoid(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::sigmoid_backward(output, grad)
});
}
}
match Sigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::sigmoid(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),
}
}
fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct LogSigmoid;
retro_unary!(RetroLogSigmoid, B::log_sigmoid);
impl<B: Backend> Backward<B, 1> for LogSigmoid {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::log_sigmoid_backward(input, grad)
});
}
}
match LogSigmoid
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))
}
OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}"
],
"name": "tensor",
"type": "FloatTensor<Self>"
}
],
"end_line": 169,
"name": "log_sigmoid",
"signature": "fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 133
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C> {\n fn gelu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gelu;\n\n retro_unary!(RetroGelu, B::gelu);\n\n impl<B: Backend> Backward<B, 1> for Gelu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::gelu_backward(input, grad)\n });\n }\n }\n\n match Gelu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroGelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::gelu(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::gelu(tensor.primitive)),\n }\n }\n\n fn relu(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Relu;\n\n retro_unary!(RetroRelu, B::relu);\n\n impl<B: Backend> Backward<B, 1> for Relu {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::relu_backward(state, grad)\n });\n }\n }\n\n match Relu\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRelu::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::relu(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::relu(tensor.primitive)),\n }\n }\n\n fn sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sigmoid;\n\n retro_unary!(RetroSigmoid, B::sigmoid);\n\n impl<B: Backend> Backward<B, 1> for Sigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::sigmoid(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::sigmoid_backward(output, grad)\n });\n }\n }\n\n match Sigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::sigmoid(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::sigmoid(tensor.primitive)),\n }\n }\n\n fn log_sigmoid(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct LogSigmoid;\n\n retro_unary!(RetroLogSigmoid, B::log_sigmoid);\n\n impl<B: Backend> Backward<B, 1> for LogSigmoid {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::log_sigmoid_backward(input, grad)\n });\n }\n }\n\n match LogSigmoid\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLogSigmoid::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::log_sigmoid(tensor.primitive.clone()))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::log_sigmoid(tensor.primitive)),\n }\n }\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> ActivationOps<Autodiff<B, C>> for Autodiff<B, C>"
} |
float_to_device | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "tensor",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "device",
"type": "&Device<Self>"
}
],
"end_line": 116,
"name": "float_to_device",
"signature": "fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self>",
"start_line": 86
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_add | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 162,
"name": "float_add",
"signature": "fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 122
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_add_scalar | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 189,
"name": "float_add_scalar",
"signature": "fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self>",
"start_line": 164
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_sub | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 231,
"name": "float_sub",
"signature": "fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 191
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_sub_scalar | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 258,
"name": "float_sub_scalar",
"signature": "fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self>",
"start_line": 233
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_mul | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 317,
"name": "float_mul",
"signature": "fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 260
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_mul_scalar | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 350,
"name": "float_mul_scalar",
"signature": "fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self>",
"start_line": 319
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_div | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 417,
"name": "float_div",
"signature": "fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 352
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_div_scalar | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 451,
"name": "float_div_scalar",
"signature": "fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self>",
"start_line": 419
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_remainder | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 517,
"name": "float_remainder",
"signature": "fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 453
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_remainder_scalar | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 544,
"name": "float_remainder_scalar",
"signature": "fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self>",
"start_line": 519
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
float_matmul | burn-main/crates/burn-autodiff/src/ops/tensor.rs | fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
} | use alloc::{boxed::Box, vec, vec::Vec};
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
#[allow(unused_imports, reason = "required on aarch64, unused on x86_64")]
use num_traits::float::Float;
use crate::{
Autodiff,
checkpoint::{
base::Checkpointer, builder::CheckpointerBuilder, retro_forward::RetroForward,
state::BackwardStates, strategy::CheckpointStrategy,
},
grads::Gradients,
graph::{ComputingProperty, NodeID, NodeRef, Requirement, Step},
ops::{Backward, Ops, OpsKind, binary, broadcast_shape, unary},
retro_binary, retro_unary, retro_unary_scalar,
tensor::AutodiffTensor,
utils::duplicate,
};
use burn_tensor::{
Device, ElementConversion, Shape, TensorData, TensorMetadata,
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
};
use super::maxmin::MaxMinDim;
// Unsqueeze op on primitive.
fn unsqueeze_like<B: Backend>(
tensor: B::FloatTensorPrimitive,
shape: Shape,
) -> B::FloatTensorPrimitive {
/*
let mut dims = [1; D2];
let num_ones = D2 - D;
let shape = self.shape();
dims[num_ones..(D + num_ones)].copy_from_slice(&shape.dims[..D]);
let shape = Shape::new(dims);
self.reshape(shape)
*/
let ndims_out = shape.num_dims();
let shape = tensor.shape();
let ndims_in = shape.num_dims();
let mut dims = vec![1; ndims_out];
let num_ones = ndims_out - ndims_in;
dims[num_ones..(ndims_in + num_ones)].copy_from_slice(&shape.dims[..ndims_in]);
B::float_reshape(tensor, Shape::from(dims))
}
impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {
fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_from_data(data, device))
}
fn float_random(
shape: Shape,
distribution: burn_tensor::Distribution,
device: &Device<Self>,
) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_random(shape, distribution, device))
}
fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_zeros(shape, device))
}
fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_ones(shape, device))
}
async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {
B::float_into_data(tensor.primitive).await
}
fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {
B::float_device(&tensor.primitive)
}
fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct ToDevice;
impl<B: Backend> Backward<B, 1> for ToDevice {
type State = B::Device;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_to_device(grad, &ops.state)
});
}
}
match ToDevice
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let device_old = B::float_device(&tensor.primitive);
prep.finish(device_old, B::float_to_device(tensor.primitive, device))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),
}
}
fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_empty(shape, device))
}
fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Add;
retro_binary!(RetroAdd, B::float_add);
impl<B: Backend> Backward<B, 2> for Add {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(grad, &shape_rhs),
);
}
}
match Add
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_add(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),
}
}
fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct AddScalar;
retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);
impl<B: Backend> Backward<B, 1> for AddScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
AddScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_add_scalar(lhs.primitive, rhs))
}
fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sub;
retro_binary!(RetroSub, B::float_sub);
impl<B: Backend> Backward<B, 2> for Sub {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_lhs, shape_rhs) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| broadcast_shape::<B>(grad, &shape_lhs),
|grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),
);
}
}
match Sub
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(lhs.primitive.shape(), rhs.primitive.shape()),
B::float_sub(lhs.primitive, rhs.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),
}
}
fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct SubScalar;
retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);
impl<B: Backend> Backward<B, 1> for SubScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
SubScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_sub_scalar(lhs.primitive, rhs))
}
fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mul;
retro_binary!(RetroMul, B::float_mul);
impl<B: Backend> Backward<B, 2> for Mul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let grad = B::float_mul(grad, rhs.unwrap());
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let grad = B::float_mul(grad, lhs.unwrap());
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Mul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_mul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),
}
}
fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct MulScalar;
retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);
impl<B: Backend> Backward<B, 1> for MulScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul_scalar(grad, ops.state)
});
}
}
match MulScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),
}
}
fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Div;
retro_binary!(RetroDiv, B::float_div);
impl<B: Backend> Backward<B, 2> for Div {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = rhs_4lhs.unwrap();
let value = B::float_powf_scalar(rhs, -1.0);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let rhs = rhs_4rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Div
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_div(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),
}
}
fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct DivScalar;
retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);
impl<B: Backend> Backward<B, 1> for DivScalar {
type State = FloatElem<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = 1.0 / ops.state.elem::<f32>();
B::float_mul_scalar(grad, tmp.elem())
});
}
}
match DivScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),
OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),
}
}
fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Rem;
retro_binary!(RetroRem, B::float_remainder);
impl<B: Backend> Backward<B, 2> for Rem {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
// remainder(x, y) = x - floor(x / y) * y
// partial(x - floor(x / y) * y, x) = 1
broadcast.backward_lhs::<B>(grad)
},
|grad| {
// partial(x - floor(x / y) * y, y) = - floor(x / y)
let rhs = rhs.unwrap();
let lhs = lhs.unwrap();
let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Rem
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_remainder(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))
}
}
}
fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {
#[derive(Debug)]
struct RemainderScalar;
retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);
impl<B: Backend> Backward<B, 1> for RemainderScalar {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);
}
}
RemainderScalar
.prepare::<C>([lhs.node.clone()])
.memory_bound()
.retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))
.parents([&lhs])
.stateless(B::float_remainder_scalar(lhs.primitive, rhs))
}
fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Matmul;
impl<B: Backend> Backward<B, 2> for Matmul {
type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs, rhs, broadcast) = ops.state;
let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));
let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let rhs = B::float_transpose(rhs.unwrap());
let grad = B::float_matmul(grad, rhs);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
let lhs = B::float_transpose(lhs.unwrap());
let grad = B::float_matmul(lhs, grad);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let lhs_tracked = lhs.is_tracked();
let rhs_tracked = rhs.is_tracked();
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match Matmul
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.compute_bound()
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));
let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_matmul(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),
}
}
fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Neg;
retro_unary!(RetroNeg, B::float_neg);
impl<B: Backend> Backward<B, 1> for Neg {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));
}
}
Neg.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroNeg::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_neg(tensor.primitive))
}
fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Recip;
retro_unary!(RetroRecip, B::float_recip);
impl<B: Backend> Backward<B, 1> for Recip {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, -2.0);
let value = B::float_neg(tmp);
B::float_mul(grad, value)
});
}
}
match Recip
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRecip::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_recip(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),
}
}
fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SwapDim;
#[derive(new, Debug)]
struct RetroSwapDims<B: Backend> {
input_id: NodeID,
dim1: usize,
dim2: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSwapDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_swap_dims(input, self.dim1, self.dim2);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for SwapDim {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim1, dim2) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_swap_dims(grad, dim2, dim1)
});
}
}
match SwapDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim1, dim2),
B::float_swap_dims(tensor.primitive, dim1, dim2),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))
}
}
}
fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct PermuteDim;
#[derive(new, Debug)]
struct RetroPermuteDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPermuteDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_permute(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PermuteDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
let mut inverse = vec![0usize; axes.len()];
axes.iter()
.enumerate()
.for_each(|(i, &axis)| inverse[axis] = i);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_permute(grad, &inverse)
});
}
}
match PermuteDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),
}
}
fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {
#[derive(Debug)]
struct FlipDim;
#[derive(new, Debug)]
struct RetroFlipDims<B: Backend> {
input_id: NodeID,
axes: Vec<usize>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroFlipDims<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_flip(input, &self.axes);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for FlipDim {
type State = Vec<usize>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let axes = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_flip(grad, &axes)
});
}
}
match FlipDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => {
prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),
}
}
fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
#[derive(Debug)]
struct ReshapeDim;
#[derive(new, Debug)]
struct RetroReshape<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroReshape<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_reshape(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ReshapeDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_original, shape) = ops.state;
let ndims_out = shape.num_dims();
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
for i in 0..ndims_out {
if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_original)
});
}
}
match ReshapeDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_reshape(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),
}
}
fn float_gather(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Gather;
impl<B: Backend> Backward<B, 1> for Gather {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_scatter(dim, zeros, indices, grad)
});
}
}
match Gather
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_gather(dim, tensor.primitive, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_gather(dim, tensor.primitive, indices))
}
}
}
fn float_scatter(
dim: usize,
tensor: FloatTensor<Self>,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Scatter;
impl<B: Backend> Backward<B, 2> for Scatter {
type State = (usize, IntTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;
let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs, &device);
B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)
},
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)
},
);
}
}
match Scatter
.prepare::<C>([tensor.node, value.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_scatter(dim, tensor.primitive, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(
dim,
tensor.primitive,
indices,
value.primitive,
)),
}
}
fn float_select(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Select;
#[derive(new, Debug)]
struct RetroSelect<B: Backend> {
input_id: NodeID,
dim: usize,
indices: IntTensor<B>,
}
impl<B: Backend> RetroForward for RetroSelect<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_select(input, self.dim, self.indices.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Select {
type State = (usize, IntTensor<B>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_select_assign(zeros, dim, indices, grad)
});
}
}
match Select
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
dim,
indices.clone(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_select(tensor.primitive, dim, indices),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_select(tensor.primitive, dim, indices))
}
}
}
fn float_select_assign(
tensor: FloatTensor<Self>,
dim: usize,
indices: IntTensor<B>,
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct IndexSelectDimAssign;
#[derive(new, Debug)]
struct RetroSelectAssign<B: Backend> {
tensor_id: NodeID,
dim: usize,
indices: IntTensor<B>,
value_id: NodeID,
}
impl<B: Backend> RetroForward for RetroSelectAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {
type State = (usize, IntTensor<B>);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, indices) = ops.state;
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| grad,
|grad| B::float_select(grad, dim, indices),
);
}
}
match IndexSelectDimAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSelectAssign::<B>::new(
tensor.node.id,
dim,
indices.clone(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, indices.clone()),
B::float_select_assign(tensor.primitive, dim, indices, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(
tensor.primitive,
dim,
indices,
value.primitive,
)),
}
}
fn float_slice(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
) -> FloatTensor<Self> {
#[derive(Debug)]
struct Index;
#[derive(new, Debug)]
struct RetroSlice<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSlice<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_slice(tensor, &self.ranges);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Index {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let zeros = B::float_zeros(shape, &device);
B::float_slice_assign(zeros, &ranges, grad)
});
}
}
match Index
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
tensor.primitive.shape(),
B::float_device(&tensor.primitive),
),
B::float_slice(tensor.primitive, ranges),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),
}
}
fn float_slice_assign(
tensor: FloatTensor<Self>,
ranges: &[core::ops::Range<usize>],
value: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct SliceAssign;
#[derive(new, Debug)]
struct RetroSliceAssign<B: Backend> {
tensor_id: NodeID,
ranges: Vec<core::ops::Range<usize>>,
value_id: NodeID,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroSliceAssign<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);
let out = B::float_slice_assign(tensor, &self.ranges, value);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 2> for SliceAssign {
type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (ranges, shape_rhs, device) = ops.state;
let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_rhs, &device);
B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)
},
|grad| B::float_slice(grad, &ranges_4rhs.unwrap()),
);
}
}
match SliceAssign
.prepare::<C>([tensor.node.clone(), value.node.clone()])
.memory_bound()
.retro_forward(RetroSliceAssign::<B>::new(
tensor.node.id,
ranges.to_vec(),
value.node.id,
))
.parents([&tensor, &value])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
ranges.to_vec(),
value.primitive.shape(),
B::float_device(&value.primitive),
),
B::float_slice_assign(tensor.primitive, ranges, value.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(
tensor.primitive,
ranges,
value.primitive,
)),
}
}
fn float_mask_where(
tensor: FloatTensor<Self>,
mask: BoolTensor<Self>,
source: FloatTensor<Self>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskWhere;
impl<B: Backend> Backward<B, 2> for MaskWhere {
type State = (BoolTensor<B>, Shape, Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (mask, shape_lhs, shape_rhs, device) = ops.state;
let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
let zeros = B::float_zeros(shape_lhs.clone(), &device);
let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);
broadcast_shape::<B>(grad, &shape_lhs)
},
|grad| {
let zeros = B::float_zeros(shape_rhs.clone(), &device);
let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);
broadcast_shape::<B>(grad, &shape_rhs)
},
);
}
}
match MaskWhere
.prepare::<C>([tensor.node, source.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(
mask.clone(),
tensor.primitive.shape(),
source.primitive.shape(),
B::float_device(&source.primitive),
),
B::float_mask_where(tensor.primitive, mask, source.primitive),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(
tensor.primitive,
mask,
source.primitive,
)),
}
}
fn float_mask_fill(
tensor: FloatTensor<Self>,
mask: BoolTensor<B>,
value: FloatElem<B>,
) -> FloatTensor<Self> {
#[derive(Debug)]
struct MaskFill;
impl<B: Backend> Backward<B, 1> for MaskFill {
type State = BoolTensor<B>;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mask_fill(grad, ops.state, 0.elem())
});
}
}
match MaskFill
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
mask.clone(),
B::float_mask_fill(tensor.primitive, mask, value),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_mask_fill(tensor.primitive, mask, value))
}
}
}
fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_equal(lhs.primitive, rhs.primitive)
}
fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_equal_elem(lhs.primitive, rhs)
}
fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater(lhs.primitive, rhs.primitive)
}
fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_elem(lhs.primitive, rhs)
}
fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_greater_equal(lhs.primitive, rhs.primitive)
}
fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_greater_equal_elem(lhs.primitive, rhs)
}
fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower(lhs.primitive, rhs.primitive)
}
fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_elem(lhs.primitive, rhs)
}
fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {
B::float_lower_equal(lhs.primitive, rhs.primitive)
}
fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {
B::float_lower_equal_elem(lhs.primitive, rhs)
}
fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
// When we detach a tensor, we remove it from the graph, but we still want to keep the
// `require_grad` setting.
let is_require_grad = Self::float_is_require_grad(&tensor);
let tensor = AutodiffTensor::new(tensor.primitive);
match is_require_grad {
true => tensor.require_grad(),
false => tensor,
}
}
fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {
if require_grad {
return tensor.require_grad();
}
AutodiffTensor::new(tensor.primitive)
}
fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {
matches!(tensor.node.requirement, Requirement::Grad)
}
fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Mean;
impl<B: Backend> Backward<B, 1> for Mean {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape = ops.state;
let val = 1_f64 / shape.num_elements() as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, val.elem());
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),
}
}
fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sum;
impl<B: Backend> Backward<B, 1> for Sum {
type State = Shape;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = B::float_ones(ops.state, &B::float_device(&grad));
let grad = unsqueeze_like::<B>(grad, val.shape());
B::float_mul(val, grad)
});
}
}
match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {
OpsKind::Tracked(prep) => {
prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),
}
}
fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct MeanDim;
impl<B: Backend> Backward<B, 1> for MeanDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let val = 1_f64 / shape.dims[dim] as f64;
let ones = B::float_ones(shape, &B::float_device(&grad));
let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(val, grad)
});
}
}
match MeanDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_mean_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),
}
}
fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct SumDim;
impl<B: Backend> Backward<B, 1> for SumDim {
type State = (Shape, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, dim) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ones = B::float_ones(shape, &B::float_device(&grad));
let grad = B::float_sum_dim(grad, dim);
B::float_mul(ones, grad)
});
}
}
match SumDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), dim),
B::float_sum_dim(tensor.primitive, dim),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),
}
}
fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmax(tensor.primitive, dim)
}
fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {
B::float_argmin(tensor.primitive, dim)
}
fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Exp;
retro_unary!(RetroExp, B::float_exp);
impl<B: Backend> Backward<B, 1> for Exp {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let output = B::float_exp(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, output)
});
}
}
match Exp
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExp::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_exp(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),
}
}
fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log;
retro_unary!(RetroLog, B::float_log);
impl<B: Backend> Backward<B, 1> for Log {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_powf_scalar(input, -1.0);
B::float_mul(grad, value)
});
}
}
match Log
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),
}
}
fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Log1P;
retro_unary!(RetroLog1P, B::float_log1p);
impl<B: Backend> Backward<B, 1> for Log1P {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(input, 1.elem());
let value = B::float_powf_scalar(value, -1.0);
B::float_mul(grad, value)
});
}
}
match Log1P
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroLog1P::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_log1p(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),
}
}
fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowfScalar;
#[derive(new, Debug)]
struct RetroPowfScalar<B: Backend> {
lhs_id: NodeID,
rhs: f32,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroPowfScalar<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);
let out = B::float_powf_scalar(lhs, self.rhs);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for PowfScalar {
type State = (NodeID, f32);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (tensor_id, value) = ops.state;
let tensor = checkpointer.retrieve_node_output(tensor_id);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let tmp = B::float_powf_scalar(tensor, value - 1.0);
let value = B::float_mul_scalar(tmp, value.elem());
B::float_mul(grad, value)
});
}
}
match PowfScalar
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = (prep.checkpoint(&tensor), value);
prep.finish(state, B::float_powf_scalar(tensor.primitive, value))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),
}
}
fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sqrt;
retro_unary!(RetroSqrt, B::float_sqrt);
impl<B: Backend> Backward<B, 1> for Sqrt {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());
B::float_mul(grad, value)
});
}
}
match Sqrt
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSqrt::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sqrt(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),
}
}
fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Abs;
retro_unary!(RetroAbs, B::float_abs);
impl<B: Backend> Backward<B, 1> for Abs {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);
let state = B::float_sign(tensor);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
B::float_mul(grad, state)
});
}
}
match Abs
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroAbs::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_abs(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),
}
}
fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Cos;
retro_unary!(RetroCos, B::float_cos);
impl<B: Backend> Backward<B, 1> for Cos {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_neg(B::float_sin(input));
B::float_mul(grad, value)
});
}
}
match Cos
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCos::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_cos(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),
}
}
fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sin;
retro_unary!(RetroSin, B::float_sin);
impl<B: Backend> Backward<B, 1> for Sin {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let state = checkpointer.retrieve_node_output(ops.state);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_cos(state);
B::float_mul(grad, value)
});
}
}
match Sin
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSin::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_sin(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),
}
}
fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Tanh;
retro_unary!(RetroTanh, B::float_tanh);
impl<B: Backend> Backward<B, 1> for Tanh {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let input = checkpointer.retrieve_node_output(ops.state);
let state = B::float_tanh(input);
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let value = B::float_add_scalar(
B::float_neg(B::float_powf_scalar(state, 2.0)),
1.elem(),
);
B::float_mul(grad, value)
});
}
}
match Tanh
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroTanh::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_tanh(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),
}
}
fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Round;
retro_unary!(RetroRound, B::float_round);
impl<B: Backend> Backward<B, 1> for Round {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Round
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRound::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_round(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),
}
}
fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Floor;
retro_unary!(RetroFloor, B::float_floor);
impl<B: Backend> Backward<B, 1> for Floor {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Floor
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroFloor::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Ceil;
retro_unary!(RetroCeil, B::float_ceil);
impl<B: Backend> Backward<B, 1> for Ceil {
type State = (Shape, B::Device);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape, device) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |_grad| {
B::float_zeros(shape, &device)
})
}
}
match Ceil
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroCeil::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(preps) => preps.finish(
(tensor.primitive.shape(), B::float_device(&tensor.primitive)),
B::float_floor(tensor.primitive),
),
OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),
}
}
fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Erf;
retro_unary!(RetroErf, B::float_erf);
impl<B: Backend> Backward<B, 1> for Erf {
type State = NodeID;
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let ops = checkpointer.retrieve_node_output(ops.state);
let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));
let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());
let denominator = core::f64::consts::PI.sqrt().elem();
let value = B::float_div_scalar(numerator, denominator);
B::float_mul(grad, value)
});
}
}
match Erf
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroErf::<B>::new(tensor.node.id))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let state = prep.checkpoint(&tensor);
prep.finish(state, B::float_erf(tensor.primitive))
}
OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),
}
}
fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {
#[derive(new, Debug)]
struct CatStep<B: Backend> {
nodes: Vec<Option<NodeRef>>,
// The dimension of each tensor along the dim dimension.
// This indicates the number of dimension concatenated for each tensor.
dim_sizes: Vec<usize>,
output: NodeRef,
phantom: PhantomData<B>,
dim: usize,
}
impl<B: Backend> Step for CatStep<B> {
fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {
let grad = grads.consume::<B>(&self.output);
let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();
let mut current_index = 0;
self.nodes
.into_iter()
.zip(self.dim_sizes)
.filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))
.for_each(|(node, dim_size)| {
let mut ranges = ranges.clone();
ranges[self.dim] = current_index..dim_size + current_index;
current_index += dim_size;
grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));
});
}
fn node(&self) -> NodeID {
self.output.id
}
fn parents(&self) -> Vec<NodeID> {
self.nodes
.iter()
.filter_map(|node| node.clone())
.map(|node| node.id)
.collect()
}
fn depth(&self) -> usize {
self.output.order
}
}
let mut nodes = Vec::with_capacity(tensors.len());
let mut primitives = Vec::with_capacity(tensors.len());
let mut dim_sizes = Vec::with_capacity(tensors.len());
tensors.into_iter().for_each(|tensor| {
dim_sizes.push(tensor.primitive.shape().dims[dim]);
nodes.push(tensor.node);
primitives.push(tensor.primitive);
});
let requirement = Requirement::from_nodes(&nodes);
// For simplicity, this operation does not checkpoint anything
let cat_computing_property = ComputingProperty::Ambiguous;
let checkpointer_builder = CheckpointerBuilder::default();
let output = B::float_cat(primitives, dim);
if requirement.is_none() {
return AutodiffTensor::from_parents(
output,
&nodes,
requirement,
cat_computing_property,
);
}
let output =
AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);
let nodes = nodes
.into_iter()
.map(|node| node.clone_if_require_grad())
.collect::<Vec<_>>();
let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);
output.register_step(ops, checkpointer_builder)
}
fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),
}
}
fn float_max_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
prep.finish((index, shape), tensor)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),
}
}
fn float_min_dim_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
) -> (FloatTensor<Self>, IntTensor<B>) {
match MaxMinDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish((index.clone(), shape), tensor);
(tensor, index)
}
OpsKind::UnTracked(prep) => {
let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);
let tensor = prep.finish(tensor);
(tensor, index)
}
}
}
fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {
B::float_into_int(tensor.primitive)
}
fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct PowF;
retro_binary!(RetroPowf, B::float_powf);
impl<B: Backend> Backward<B, 2> for PowF {
type State = (NodeID, NodeID, BinaryOpsBroadcast);
fn backward(
self,
ops: Ops<Self::State, 2>,
grads: &mut Gradients,
checkpointer: &mut Checkpointer,
) {
let (lhs_id, rhs_id, broadcast) = ops.state;
let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);
let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);
// Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them
// the number of times required by the parents specification.
let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));
let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));
binary::<B, _, _>(
ops.parents,
ops.node,
grads,
|grad| {
//rhs*(lhs.val**(rhs-1))*grad
let rhs1 = rhs_4lhs.unwrap();
let rhs2 = rhs1.clone();
let lhs = lhs_4lhs.unwrap();
let tmp = B::float_powf(
lhs,
B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),
);
let value = B::float_mul(tmp, rhs2);
let grad = B::float_mul(grad, value);
broadcast.backward_lhs::<B>(grad)
},
|grad| {
//lhs**rhs * ln(lhs) * grad
let rhs = rhs_4rhs.unwrap();
let lhs1 = lhs_4rhs.unwrap();
let lhs2 = lhs1.clone();
let tmp = B::float_powf(lhs1, rhs);
let value = B::float_mul(tmp, B::float_log(lhs2));
let grad = B::float_mul(grad, value);
broadcast.backward_rhs::<B>(grad)
},
);
}
}
let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);
match PowF
.prepare::<C>([lhs.node.clone(), rhs.node.clone()])
.memory_bound()
.retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))
.parents([&lhs, &rhs])
.stateful()
{
OpsKind::Tracked(mut prep) => {
let lhs_state = prep.checkpoint(&lhs);
let rhs_state = prep.checkpoint(&rhs);
prep.finish(
(lhs_state, rhs_state, broadcast),
B::float_powf(lhs.primitive, rhs.primitive),
)
}
OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),
}
}
fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {
#[derive(Debug)]
struct Sign;
retro_unary!(RetroSign, B::float_sign);
impl<B: Backend> Backward<B, 1> for Sign {
type State = ();
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
unary::<B, _>(ops.parents, ops.node, grads, |grad|
// Always return 0 because the derivative of the sign function
// does not contribute to gradient updates in a meaningful way.
B::float_mul_scalar(grad, 0.elem()));
}
}
Sign.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroSign::<B>::new(tensor.node.id))
.parents([&tensor])
.stateless(B::float_sign(tensor.primitive))
}
fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {
// D1: tensor, D2: shape
#[derive(Debug)]
struct ExpandDim;
#[derive(new, Debug)]
struct RetroExpand<B: Backend> {
input_id: NodeID,
shape: Shape,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroExpand<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);
let out = B::float_expand(input, self.shape.clone());
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for ExpandDim {
type State = (Shape, Shape);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (shape_in, shape_out) = ops.state;
let ndims_in = shape_in.num_dims();
let ndims_out = shape_out.num_dims();
let mut shape_expanded = vec![1; ndims_out];
debug_assert!(ndims_out >= ndims_in);
for i in 0..ndims_in {
shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];
}
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let shape_grad = grad.shape();
let mut grad = grad;
#[allow(clippy::needless_range_loop)]
for i in 0..ndims_out {
if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {
grad = B::float_sum_dim(grad, i);
}
}
B::float_reshape(grad, shape_in)
});
}
}
match ExpandDim
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(tensor.primitive.shape(), shape.clone()),
B::float_expand(tensor.primitive, shape),
),
OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),
}
}
fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
prep.finish((indices, shape), tensor)
}
OpsKind::UnTracked(prep) => {
prep.finish(B::float_sort(tensor.primitive, dim, descending))
}
}
}
fn float_sort_with_indices(
tensor: FloatTensor<Self>,
dim: usize,
descending: bool,
) -> (FloatTensor<Self>, IntTensor<B>) {
match super::sort::SortDim
.prepare::<C>([tensor.node])
.compute_bound()
.stateful()
{
OpsKind::Tracked(prep) => {
let shape = tensor.primitive.shape();
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish((indices.clone(), shape), tensor);
(tensor, indices)
}
OpsKind::UnTracked(prep) => {
let (tensor, indices) =
B::float_sort_with_indices(tensor.primitive, dim, descending);
let tensor = prep.finish(tensor);
(tensor, indices)
}
}
}
fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {
B::float_argsort(tensor.primitive, dim, descending)
}
fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {
#[derive(Debug)]
struct Repeat;
#[derive(new, Debug)]
struct RetroRepeat<B: Backend> {
tensor_id: NodeID,
dim: usize,
times: usize,
_backend: PhantomData<B>,
}
impl<B: Backend> RetroForward for RetroRepeat<B> {
fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {
let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);
let out = B::float_repeat_dim(tensor, self.dim, self.times);
states.save(out_node, out)
}
}
impl<B: Backend> Backward<B, 1> for Repeat {
type State = (usize, usize);
fn backward(
self,
ops: Ops<Self::State, 1>,
grads: &mut Gradients,
_checkpointer: &mut Checkpointer,
) {
let (dim, times) = ops.state;
unary::<B, _>(ops.parents, ops.node, grads, |grad| {
let mut dims = grad.shape().dims;
let orig_dim_size = dims[dim] / times;
if orig_dim_size > 1 {
dims[dim] = orig_dim_size;
let orig_dims = dims.clone();
dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]
let grad = B::float_reshape(grad, Shape::from(dims));
let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times
B::float_reshape(grad, Shape::from(orig_dims))
} else {
B::float_sum_dim(grad, dim)
}
});
}
}
match Repeat
.prepare::<C>([tensor.node.clone()])
.memory_bound()
.retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))
.parents([&tensor])
.stateful()
{
OpsKind::Tracked(prep) => prep.finish(
(dim, times),
B::float_repeat_dim(tensor.primitive, dim, times),
),
OpsKind::UnTracked(prep) => {
prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))
}
}
}
fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {
AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))
}
// TODO: Implement float_prod and float_sum
// https://github.com/tracel-ai/burn/issues/1458
}
#[derive(Debug, Clone)]
enum BinaryOpsBroadcast {
Broadcasted(Shape, Shape),
None,
}
impl BinaryOpsBroadcast {
fn new<B: Backend>(lhs: &B::FloatTensorPrimitive, rhs: &B::FloatTensorPrimitive) -> Self {
let shape_lhs = lhs.shape();
let shape_rhs = rhs.shape();
let ndims = shape_lhs.num_dims();
for i in 0..ndims {
if shape_rhs.dims[i] != shape_lhs.dims[i] {
return Self::Broadcasted(shape_lhs, shape_rhs);
}
}
Self::None
}
fn backward_lhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(lhs, _rhs) => broadcast_shape::<B>(grad, lhs),
BinaryOpsBroadcast::None => grad,
}
}
fn backward_rhs<B: Backend>(&self, grad: B::FloatTensorPrimitive) -> B::FloatTensorPrimitive {
match self {
BinaryOpsBroadcast::Broadcasted(_lhs, rhs) => broadcast_shape::<B>(grad, rhs),
BinaryOpsBroadcast::None => grad,
}
}
}
| rust | {
"argument_definitions": [
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "lhs",
"type": "FloatTensor<Self>"
},
{
"definitions": [
"impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}"
],
"name": "rhs",
"type": "FloatTensor<Self>"
}
],
"end_line": 602,
"name": "float_matmul",
"signature": "fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self>",
"start_line": 546
} | {
"class_name": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C> {\n fn float_from_data(data: TensorData, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_from_data(data, device))\n }\n\n fn float_random(\n shape: Shape,\n distribution: burn_tensor::Distribution,\n device: &Device<Self>,\n ) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_random(shape, distribution, device))\n }\n\n fn float_zeros(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_zeros(shape, device))\n }\n\n fn float_ones(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_ones(shape, device))\n }\n\n async fn float_into_data(tensor: FloatTensor<Self>) -> TensorData {\n B::float_into_data(tensor.primitive).await\n }\n\n fn float_device(tensor: &FloatTensor<Self>) -> Device<Self> {\n B::float_device(&tensor.primitive)\n }\n\n fn float_to_device(tensor: FloatTensor<Self>, device: &Device<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ToDevice;\n\n impl<B: Backend> Backward<B, 1> for ToDevice {\n type State = B::Device;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_to_device(grad, &ops.state)\n });\n }\n }\n\n match ToDevice\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let device_old = B::float_device(&tensor.primitive);\n prep.finish(device_old, B::float_to_device(tensor.primitive, device))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_to_device(tensor.primitive, device)),\n }\n }\n\n fn float_empty(shape: Shape, device: &Device<Self>) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_empty(shape, device))\n }\n\n fn float_add(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Add;\n\n retro_binary!(RetroAdd, B::float_add);\n\n impl<B: Backend> Backward<B, 2> for Add {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(grad, &shape_rhs),\n );\n }\n }\n\n match Add\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAdd::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_add(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_add(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_add_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct AddScalar;\n\n retro_unary_scalar!(RetroAddScalar, B::float_add_scalar);\n\n impl<B: Backend> Backward<B, 1> for AddScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n AddScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroAddScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_add_scalar(lhs.primitive, rhs))\n }\n\n fn float_sub(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sub;\n\n retro_binary!(RetroSub, B::float_sub);\n\n impl<B: Backend> Backward<B, 2> for Sub {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_lhs, shape_rhs) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| broadcast_shape::<B>(grad, &shape_lhs),\n |grad| broadcast_shape::<B>(B::float_neg(grad), &shape_rhs),\n );\n }\n }\n\n match Sub\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSub::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (lhs.primitive.shape(), rhs.primitive.shape()),\n B::float_sub(lhs.primitive, rhs.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_sub(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sub_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SubScalar;\n\n retro_unary_scalar!(RetroSubScalar, B::float_sub_scalar);\n\n impl<B: Backend> Backward<B, 1> for SubScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n SubScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroSubScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_sub_scalar(lhs.primitive, rhs))\n }\n\n fn float_mul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mul;\n\n retro_binary!(RetroMul, B::float_mul);\n\n impl<B: Backend> Backward<B, 2> for Mul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let grad = B::float_mul(grad, rhs.unwrap());\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let grad = B::float_mul(grad, lhs.unwrap());\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Mul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMul::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_mul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_mul_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MulScalar;\n\n retro_unary_scalar!(RetroMulScalar, B::float_mul_scalar);\n\n impl<B: Backend> Backward<B, 1> for MulScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul_scalar(grad, ops.state)\n });\n }\n }\n\n match MulScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroMulScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_mul_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mul_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_div(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Div;\n\n retro_binary!(RetroDiv, B::float_div);\n\n impl<B: Backend> Backward<B, 2> for Div {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, rhs);\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = rhs_4lhs.unwrap();\n let value = B::float_powf_scalar(rhs, -1.0);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let rhs = rhs_4rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_div(B::float_neg(lhs), B::float_powf_scalar(rhs, 2.0));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Div\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDiv::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_div(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_div(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_div_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct DivScalar;\n\n retro_unary_scalar!(RetroDivScalar, B::float_div_scalar);\n\n impl<B: Backend> Backward<B, 1> for DivScalar {\n type State = FloatElem<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = 1.0 / ops.state.elem::<f32>();\n B::float_mul_scalar(grad, tmp.elem())\n });\n }\n }\n\n match DivScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroDivScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(rhs, B::float_div_scalar(lhs.primitive, rhs)),\n OpsKind::UnTracked(prep) => prep.finish(B::float_div_scalar(lhs.primitive, rhs)),\n }\n }\n\n fn float_remainder(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Rem;\n\n retro_binary!(RetroRem, B::float_remainder);\n\n impl<B: Backend> Backward<B, 2> for Rem {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n // remainder(x, y) = x - floor(x / y) * y\n // partial(x - floor(x / y) * y, x) = 1\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n // partial(x - floor(x / y) * y, y) = - floor(x / y)\n let rhs = rhs.unwrap();\n let lhs = lhs.unwrap();\n let value = B::float_neg(B::float_floor(B::float_div(lhs, rhs)));\n let grad = B::float_mul(grad, value);\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Rem\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRem::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = (lhs_tracked || rhs_tracked).then(|| prep.checkpoint(&rhs));\n\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_remainder(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_remainder(lhs.primitive, rhs.primitive))\n }\n }\n }\n\n fn float_remainder_scalar(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct RemainderScalar;\n\n retro_unary_scalar!(RetroRemainderScalar, B::float_remainder_scalar);\n\n impl<B: Backend> Backward<B, 1> for RemainderScalar {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| grad);\n }\n }\n\n RemainderScalar\n .prepare::<C>([lhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroRemainderScalar::<B>::new(lhs.node.id, rhs))\n .parents([&lhs])\n .stateless(B::float_remainder_scalar(lhs.primitive, rhs))\n }\n\n fn float_matmul(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Matmul;\n\n impl<B: Backend> Backward<B, 2> for Matmul {\n type State = (Option<NodeID>, Option<NodeID>, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs, rhs, broadcast) = ops.state;\n let lhs = lhs.map(|lhs| checkpointer.retrieve_node_output(lhs));\n let rhs = rhs.map(|rhs| checkpointer.retrieve_node_output(rhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let rhs = B::float_transpose(rhs.unwrap());\n let grad = B::float_matmul(grad, rhs);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n let lhs = B::float_transpose(lhs.unwrap());\n let grad = B::float_matmul(lhs, grad);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let lhs_tracked = lhs.is_tracked();\n let rhs_tracked = rhs.is_tracked();\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match Matmul\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = rhs_tracked.then(|| prep.checkpoint(&lhs));\n let rhs_state = lhs_tracked.then(|| prep.checkpoint(&rhs));\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_matmul(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_matmul(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_neg(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Neg;\n\n retro_unary!(RetroNeg, B::float_neg);\n\n impl<B: Backend> Backward<B, 1> for Neg {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| B::float_neg(grad));\n }\n }\n\n Neg.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroNeg::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_neg(tensor.primitive))\n }\n\n fn float_recip(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Recip;\n\n retro_unary!(RetroRecip, B::float_recip);\n\n impl<B: Backend> Backward<B, 1> for Recip {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, -2.0);\n let value = B::float_neg(tmp);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Recip\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRecip::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_recip(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_recip(tensor.primitive)),\n }\n }\n\n fn float_swap_dims(tensor: FloatTensor<Self>, dim1: usize, dim2: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SwapDim;\n\n #[derive(new, Debug)]\n struct RetroSwapDims<B: Backend> {\n input_id: NodeID,\n dim1: usize,\n dim2: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSwapDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_swap_dims(input, self.dim1, self.dim2);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for SwapDim {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim1, dim2) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_swap_dims(grad, dim2, dim1)\n });\n }\n }\n\n match SwapDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSwapDims::<B>::new(tensor.node.id, dim1, dim2))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim1, dim2),\n B::float_swap_dims(tensor.primitive, dim1, dim2),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_swap_dims(tensor.primitive, dim1, dim2))\n }\n }\n }\n\n fn float_permute(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PermuteDim;\n\n #[derive(new, Debug)]\n struct RetroPermuteDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPermuteDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_permute(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PermuteDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n let mut inverse = vec![0usize; axes.len()];\n axes.iter()\n .enumerate()\n .for_each(|(i, &axis)| inverse[axis] = i);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_permute(grad, &inverse)\n });\n }\n }\n\n match PermuteDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPermuteDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_permute(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_permute(tensor.primitive, axes)),\n }\n }\n\n fn float_flip(tensor: FloatTensor<Self>, axes: &[usize]) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct FlipDim;\n\n #[derive(new, Debug)]\n struct RetroFlipDims<B: Backend> {\n input_id: NodeID,\n axes: Vec<usize>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroFlipDims<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_flip(input, &self.axes);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for FlipDim {\n type State = Vec<usize>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let axes = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_flip(grad, &axes)\n });\n }\n }\n\n match FlipDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFlipDims::<B>::new(tensor.node.id, axes.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n prep.finish(axes.to_vec(), B::float_flip(tensor.primitive, axes))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_flip(tensor.primitive, axes)),\n }\n }\n\n fn float_reshape(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct ReshapeDim;\n\n #[derive(new, Debug)]\n struct RetroReshape<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroReshape<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_reshape(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ReshapeDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_original, shape) = ops.state;\n let ndims_out = shape.num_dims();\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n for i in 0..ndims_out {\n if shape.dims[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_original)\n });\n }\n }\n\n match ReshapeDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroReshape::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_reshape(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_reshape(tensor.primitive, shape)),\n }\n }\n\n fn float_gather(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Gather;\n\n impl<B: Backend> Backward<B, 1> for Gather {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_scatter(dim, zeros, indices, grad)\n });\n }\n }\n\n match Gather\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_gather(dim, tensor.primitive, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_gather(dim, tensor.primitive, indices))\n }\n }\n }\n\n fn float_scatter(\n dim: usize,\n tensor: FloatTensor<Self>,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Scatter;\n\n impl<B: Backend> Backward<B, 2> for Scatter {\n type State = (usize, IntTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape_lhs, shape_rhs, device) = ops.state;\n let [indices_4lhs, indices_4rhs] = duplicate(&ops.parents, Some(indices));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs, &device);\n B::float_scatter(dim, grad, indices_4lhs.unwrap(), zeros)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_scatter(dim, zeros, indices_4rhs.unwrap(), grad)\n },\n );\n }\n }\n\n match Scatter\n .prepare::<C>([tensor.node, value.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_scatter(dim, tensor.primitive, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_scatter(\n dim,\n tensor.primitive,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_select(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Select;\n\n #[derive(new, Debug)]\n struct RetroSelect<B: Backend> {\n input_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSelect<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_select(input, self.dim, self.indices.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Select {\n type State = (usize, IntTensor<B>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_select_assign(zeros, dim, indices, grad)\n });\n }\n }\n\n match Select\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelect::<B>::new(tensor.node.id, dim, indices.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n dim,\n indices.clone(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_select(tensor.primitive, dim, indices),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_select(tensor.primitive, dim, indices))\n }\n }\n }\n\n fn float_select_assign(\n tensor: FloatTensor<Self>,\n dim: usize,\n indices: IntTensor<B>,\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct IndexSelectDimAssign;\n\n #[derive(new, Debug)]\n struct RetroSelectAssign<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n indices: IntTensor<B>,\n value_id: NodeID,\n }\n\n impl<B: Backend> RetroForward for RetroSelectAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_select_assign(tensor, self.dim, self.indices.clone(), value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for IndexSelectDimAssign {\n type State = (usize, IntTensor<B>);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, indices) = ops.state;\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| grad,\n |grad| B::float_select(grad, dim, indices),\n );\n }\n }\n\n match IndexSelectDimAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSelectAssign::<B>::new(\n tensor.node.id,\n dim,\n indices.clone(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, indices.clone()),\n B::float_select_assign(tensor.primitive, dim, indices, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_select_assign(\n tensor.primitive,\n dim,\n indices,\n value.primitive,\n )),\n }\n }\n\n fn float_slice(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Index;\n\n #[derive(new, Debug)]\n struct RetroSlice<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSlice<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_slice(tensor, &self.ranges);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Index {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape, device) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let zeros = B::float_zeros(shape, &device);\n B::float_slice_assign(zeros, &ranges, grad)\n });\n }\n }\n\n match Index\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSlice::<B>::new(tensor.node.id, ranges.to_vec()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n tensor.primitive.shape(),\n B::float_device(&tensor.primitive),\n ),\n B::float_slice(tensor.primitive, ranges),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice(tensor.primitive, ranges)),\n }\n }\n\n fn float_slice_assign(\n tensor: FloatTensor<Self>,\n ranges: &[core::ops::Range<usize>],\n value: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SliceAssign;\n\n #[derive(new, Debug)]\n struct RetroSliceAssign<B: Backend> {\n tensor_id: NodeID,\n ranges: Vec<core::ops::Range<usize>>,\n value_id: NodeID,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroSliceAssign<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let value = states.get_state::<B::FloatTensorPrimitive>(&self.value_id);\n let out = B::float_slice_assign(tensor, &self.ranges, value);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 2> for SliceAssign {\n type State = (Vec<core::ops::Range<usize>>, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (ranges, shape_rhs, device) = ops.state;\n let [ranges_4lhs, ranges_4rhs] = duplicate(&ops.parents, Some(ranges));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_rhs, &device);\n B::float_slice_assign(grad, &ranges_4lhs.unwrap(), zeros)\n },\n |grad| B::float_slice(grad, &ranges_4rhs.unwrap()),\n );\n }\n }\n\n match SliceAssign\n .prepare::<C>([tensor.node.clone(), value.node.clone()])\n .memory_bound()\n .retro_forward(RetroSliceAssign::<B>::new(\n tensor.node.id,\n ranges.to_vec(),\n value.node.id,\n ))\n .parents([&tensor, &value])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n ranges.to_vec(),\n value.primitive.shape(),\n B::float_device(&value.primitive),\n ),\n B::float_slice_assign(tensor.primitive, ranges, value.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_slice_assign(\n tensor.primitive,\n ranges,\n value.primitive,\n )),\n }\n }\n\n fn float_mask_where(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<Self>,\n source: FloatTensor<Self>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskWhere;\n\n impl<B: Backend> Backward<B, 2> for MaskWhere {\n type State = (BoolTensor<B>, Shape, Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (mask, shape_lhs, shape_rhs, device) = ops.state;\n let [mask_4lhs, mask_4rhs] = duplicate(&ops.parents, Some(mask));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n let zeros = B::float_zeros(shape_lhs.clone(), &device);\n let grad = B::float_mask_where(grad, mask_4lhs.unwrap(), zeros);\n\n broadcast_shape::<B>(grad, &shape_lhs)\n },\n |grad| {\n let zeros = B::float_zeros(shape_rhs.clone(), &device);\n let grad = B::float_mask_where(zeros, mask_4rhs.unwrap(), grad);\n\n broadcast_shape::<B>(grad, &shape_rhs)\n },\n );\n }\n }\n\n match MaskWhere\n .prepare::<C>([tensor.node, source.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (\n mask.clone(),\n tensor.primitive.shape(),\n source.primitive.shape(),\n B::float_device(&source.primitive),\n ),\n B::float_mask_where(tensor.primitive, mask, source.primitive),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mask_where(\n tensor.primitive,\n mask,\n source.primitive,\n )),\n }\n }\n\n fn float_mask_fill(\n tensor: FloatTensor<Self>,\n mask: BoolTensor<B>,\n value: FloatElem<B>,\n ) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MaskFill;\n\n impl<B: Backend> Backward<B, 1> for MaskFill {\n type State = BoolTensor<B>;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mask_fill(grad, ops.state, 0.elem())\n });\n }\n }\n\n match MaskFill\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n mask.clone(),\n B::float_mask_fill(tensor.primitive, mask, value),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_mask_fill(tensor.primitive, mask, value))\n }\n }\n }\n\n fn float_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_greater(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_elem(lhs.primitive, rhs)\n }\n\n fn float_greater_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_greater_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_greater_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_greater_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_lower(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_elem(lhs.primitive, rhs)\n }\n\n fn float_lower_equal(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> BoolTensor<B> {\n B::float_lower_equal(lhs.primitive, rhs.primitive)\n }\n\n fn float_lower_equal_elem(lhs: FloatTensor<Self>, rhs: FloatElem<B>) -> BoolTensor<B> {\n B::float_lower_equal_elem(lhs.primitive, rhs)\n }\n\n fn float_detach(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n // When we detach a tensor, we remove it from the graph, but we still want to keep the\n // `require_grad` setting.\n let is_require_grad = Self::float_is_require_grad(&tensor);\n let tensor = AutodiffTensor::new(tensor.primitive);\n\n match is_require_grad {\n true => tensor.require_grad(),\n false => tensor,\n }\n }\n\n fn float_set_require_grad(tensor: FloatTensor<Self>, require_grad: bool) -> FloatTensor<Self> {\n if require_grad {\n return tensor.require_grad();\n }\n\n AutodiffTensor::new(tensor.primitive)\n }\n\n fn float_is_require_grad(tensor: &FloatTensor<Self>) -> bool {\n matches!(tensor.node.requirement, Requirement::Grad)\n }\n\n fn float_mean(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Mean;\n\n impl<B: Backend> Backward<B, 1> for Mean {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape = ops.state;\n let val = 1_f64 / shape.num_elements() as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, val.elem());\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Mean.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_mean(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean(tensor.primitive)),\n }\n }\n\n fn float_sum(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sum;\n\n impl<B: Backend> Backward<B, 1> for Sum {\n type State = Shape;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = B::float_ones(ops.state, &B::float_device(&grad));\n\n let grad = unsqueeze_like::<B>(grad, val.shape());\n B::float_mul(val, grad)\n });\n }\n }\n\n match Sum.prepare::<C>([tensor.node]).compute_bound().stateful() {\n OpsKind::Tracked(prep) => {\n prep.finish(tensor.primitive.shape(), B::float_sum(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum(tensor.primitive)),\n }\n }\n\n fn float_mean_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct MeanDim;\n\n impl<B: Backend> Backward<B, 1> for MeanDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let val = 1_f64 / shape.dims[dim] as f64;\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let val = B::float_mul_scalar(ones, B::FloatElem::from_elem(val));\n\n let grad = B::float_sum_dim(grad, dim);\n B::float_mul(val, grad)\n });\n }\n }\n\n match MeanDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_mean_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_mean_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_sum_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct SumDim;\n\n impl<B: Backend> Backward<B, 1> for SumDim {\n type State = (Shape, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, dim) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ones = B::float_ones(shape, &B::float_device(&grad));\n let grad = B::float_sum_dim(grad, dim);\n\n B::float_mul(ones, grad)\n });\n }\n }\n\n match SumDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), dim),\n B::float_sum_dim(tensor.primitive, dim),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_sum_dim(tensor.primitive, dim)),\n }\n }\n\n fn float_argmax(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmax(tensor.primitive, dim)\n }\n\n fn float_argmin(tensor: FloatTensor<Self>, dim: usize) -> IntTensor<B> {\n B::float_argmin(tensor.primitive, dim)\n }\n\n fn float_exp(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Exp;\n\n retro_unary!(RetroExp, B::float_exp);\n\n impl<B: Backend> Backward<B, 1> for Exp {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let output = B::float_exp(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, output)\n });\n }\n }\n\n match Exp\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExp::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_exp(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_exp(tensor.primitive)),\n }\n }\n\n fn float_log(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log;\n\n retro_unary!(RetroLog, B::float_log);\n\n impl<B: Backend> Backward<B, 1> for Log {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_powf_scalar(input, -1.0);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log(tensor.primitive)),\n }\n }\n\n fn float_log1p(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Log1P;\n\n retro_unary!(RetroLog1P, B::float_log1p);\n\n impl<B: Backend> Backward<B, 1> for Log1P {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(input, 1.elem());\n let value = B::float_powf_scalar(value, -1.0);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Log1P\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroLog1P::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_log1p(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_log1p(tensor.primitive)),\n }\n }\n\n fn float_powf_scalar(tensor: FloatTensor<Self>, value: f32) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowfScalar;\n\n #[derive(new, Debug)]\n struct RetroPowfScalar<B: Backend> {\n lhs_id: NodeID,\n rhs: f32,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroPowfScalar<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let lhs = states.get_state::<B::FloatTensorPrimitive>(&self.lhs_id);\n let out = B::float_powf_scalar(lhs, self.rhs);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for PowfScalar {\n type State = (NodeID, f32);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (tensor_id, value) = ops.state;\n let tensor = checkpointer.retrieve_node_output(tensor_id);\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let tmp = B::float_powf_scalar(tensor, value - 1.0);\n let value = B::float_mul_scalar(tmp, value.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match PowfScalar\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowfScalar::<B>::new(tensor.node.id, value))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = (prep.checkpoint(&tensor), value);\n prep.finish(state, B::float_powf_scalar(tensor.primitive, value))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf_scalar(tensor.primitive, value)),\n }\n }\n\n fn float_sqrt(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sqrt;\n\n retro_unary!(RetroSqrt, B::float_sqrt);\n\n impl<B: Backend> Backward<B, 1> for Sqrt {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_div_scalar(B::float_powf_scalar(input, -0.5), 2.elem());\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sqrt\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSqrt::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sqrt(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sqrt(tensor.primitive)),\n }\n }\n\n fn float_abs(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Abs;\n\n retro_unary!(RetroAbs, B::float_abs);\n\n impl<B: Backend> Backward<B, 1> for Abs {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let tensor: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_sign(tensor);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n B::float_mul(grad, state)\n });\n }\n }\n\n match Abs\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroAbs::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_abs(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_abs(tensor.primitive)),\n }\n }\n\n fn float_cos(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Cos;\n\n retro_unary!(RetroCos, B::float_cos);\n\n impl<B: Backend> Backward<B, 1> for Cos {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_neg(B::float_sin(input));\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Cos\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCos::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_cos(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_cos(tensor.primitive)),\n }\n }\n\n fn float_sin(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sin;\n\n retro_unary!(RetroSin, B::float_sin);\n\n impl<B: Backend> Backward<B, 1> for Sin {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let state = checkpointer.retrieve_node_output(ops.state);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_cos(state);\n B::float_mul(grad, value)\n });\n }\n }\n\n match Sin\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSin::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_sin(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_sin(tensor.primitive)),\n }\n }\n\n fn float_tanh(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Tanh;\n\n retro_unary!(RetroTanh, B::float_tanh);\n\n impl<B: Backend> Backward<B, 1> for Tanh {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let input = checkpointer.retrieve_node_output(ops.state);\n let state = B::float_tanh(input);\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let value = B::float_add_scalar(\n B::float_neg(B::float_powf_scalar(state, 2.0)),\n 1.elem(),\n );\n B::float_mul(grad, value)\n });\n }\n }\n\n match Tanh\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroTanh::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_tanh(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_tanh(tensor.primitive)),\n }\n }\n\n fn float_round(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Round;\n retro_unary!(RetroRound, B::float_round);\n\n impl<B: Backend> Backward<B, 1> for Round {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Round\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRound::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_round(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_round(tensor.primitive)),\n }\n }\n\n fn float_floor(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Floor;\n retro_unary!(RetroFloor, B::float_floor);\n\n impl<B: Backend> Backward<B, 1> for Floor {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Floor\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroFloor::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_ceil(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Ceil;\n retro_unary!(RetroCeil, B::float_ceil);\n\n impl<B: Backend> Backward<B, 1> for Ceil {\n type State = (Shape, B::Device);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape, device) = ops.state;\n unary::<B, _>(ops.parents, ops.node, grads, |_grad| {\n B::float_zeros(shape, &device)\n })\n }\n }\n\n match Ceil\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroCeil::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(preps) => preps.finish(\n (tensor.primitive.shape(), B::float_device(&tensor.primitive)),\n B::float_floor(tensor.primitive),\n ),\n OpsKind::UnTracked(preps) => preps.finish(B::float_floor(tensor.primitive)),\n }\n }\n\n fn float_erf(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Erf;\n\n retro_unary!(RetroErf, B::float_erf);\n\n impl<B: Backend> Backward<B, 1> for Erf {\n type State = NodeID;\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let ops = checkpointer.retrieve_node_output(ops.state);\n let exponent = B::float_neg(B::float_powf_scalar(ops, 2.0));\n let numerator = B::float_mul_scalar(B::float_exp(exponent), 2.0.elem());\n let denominator = core::f64::consts::PI.sqrt().elem();\n let value = B::float_div_scalar(numerator, denominator);\n\n B::float_mul(grad, value)\n });\n }\n }\n\n match Erf\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroErf::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let state = prep.checkpoint(&tensor);\n prep.finish(state, B::float_erf(tensor.primitive))\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_erf(tensor.primitive)),\n }\n }\n\n fn float_cat(tensors: Vec<FloatTensor<Self>>, dim: usize) -> FloatTensor<Self> {\n #[derive(new, Debug)]\n struct CatStep<B: Backend> {\n nodes: Vec<Option<NodeRef>>,\n // The dimension of each tensor along the dim dimension.\n // This indicates the number of dimension concatenated for each tensor.\n dim_sizes: Vec<usize>,\n output: NodeRef,\n phantom: PhantomData<B>,\n dim: usize,\n }\n\n impl<B: Backend> Step for CatStep<B> {\n fn step(self: Box<Self>, grads: &mut Gradients, _checkpointer: &mut Checkpointer) {\n let grad = grads.consume::<B>(&self.output);\n let ranges: Vec<_> = grad.shape().dims.iter().map(|v| 0..*v).collect();\n\n let mut current_index = 0;\n\n self.nodes\n .into_iter()\n .zip(self.dim_sizes)\n .filter_map(|(node, dim_size)| node.map(|node| (node, dim_size)))\n .for_each(|(node, dim_size)| {\n let mut ranges = ranges.clone();\n ranges[self.dim] = current_index..dim_size + current_index;\n current_index += dim_size;\n grads.register::<B>(node.id, B::float_slice(grad.clone(), &ranges));\n });\n }\n\n fn node(&self) -> NodeID {\n self.output.id\n }\n\n fn parents(&self) -> Vec<NodeID> {\n self.nodes\n .iter()\n .filter_map(|node| node.clone())\n .map(|node| node.id)\n .collect()\n }\n fn depth(&self) -> usize {\n self.output.order\n }\n }\n\n let mut nodes = Vec::with_capacity(tensors.len());\n let mut primitives = Vec::with_capacity(tensors.len());\n let mut dim_sizes = Vec::with_capacity(tensors.len());\n\n tensors.into_iter().for_each(|tensor| {\n dim_sizes.push(tensor.primitive.shape().dims[dim]);\n nodes.push(tensor.node);\n primitives.push(tensor.primitive);\n });\n\n let requirement = Requirement::from_nodes(&nodes);\n\n // For simplicity, this operation does not checkpoint anything\n let cat_computing_property = ComputingProperty::Ambiguous;\n let checkpointer_builder = CheckpointerBuilder::default();\n\n let output = B::float_cat(primitives, dim);\n if requirement.is_none() {\n return AutodiffTensor::from_parents(\n output,\n &nodes,\n requirement,\n cat_computing_property,\n );\n }\n\n let output =\n AutodiffTensor::from_parents(output, &nodes, requirement, cat_computing_property);\n let nodes = nodes\n .into_iter()\n .map(|node| node.clone_if_require_grad())\n .collect::<Vec<_>>();\n\n let ops = CatStep::<B>::new(nodes, dim_sizes, output.node.clone(), dim);\n output.register_step(ops, checkpointer_builder)\n }\n\n fn float_max_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_max_dim(tensor.primitive, dim)),\n }\n }\n fn float_max_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_max_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n fn float_min_dim(tensor: FloatTensor<Self>, dim: usize) -> FloatTensor<Self> {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n prep.finish((index, shape), tensor)\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_min_dim(tensor.primitive, dim)),\n }\n }\n fn float_min_dim_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match MaxMinDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish((index.clone(), shape), tensor);\n\n (tensor, index)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, index) = B::float_min_dim_with_indices(tensor.primitive, dim);\n let tensor = prep.finish(tensor);\n\n (tensor, index)\n }\n }\n }\n\n fn float_into_int(tensor: FloatTensor<Self>) -> <Autodiff<B> as Backend>::IntTensorPrimitive {\n B::float_into_int(tensor.primitive)\n }\n\n fn float_powf(lhs: FloatTensor<Self>, rhs: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct PowF;\n\n retro_binary!(RetroPowf, B::float_powf);\n\n impl<B: Backend> Backward<B, 2> for PowF {\n type State = (NodeID, NodeID, BinaryOpsBroadcast);\n\n fn backward(\n self,\n ops: Ops<Self::State, 2>,\n grads: &mut Gradients,\n checkpointer: &mut Checkpointer,\n ) {\n let (lhs_id, rhs_id, broadcast) = ops.state;\n let lhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(lhs_id);\n let rhs: B::FloatTensorPrimitive = checkpointer.retrieve_node_output(rhs_id);\n\n // Both lhs and rhs are needed for both lhs and rhs gradients, but we clone them\n // the number of times required by the parents specification.\n let [rhs_4lhs, rhs_4rhs] = duplicate(&ops.parents, Some(rhs));\n let [lhs_4lhs, lhs_4rhs] = duplicate(&ops.parents, Some(lhs));\n\n binary::<B, _, _>(\n ops.parents,\n ops.node,\n grads,\n |grad| {\n //rhs*(lhs.val**(rhs-1))*grad\n let rhs1 = rhs_4lhs.unwrap();\n let rhs2 = rhs1.clone();\n let lhs = lhs_4lhs.unwrap();\n\n let tmp = B::float_powf(\n lhs,\n B::float_sub_scalar(rhs1, B::FloatElem::from_elem(1.0)),\n );\n let value = B::float_mul(tmp, rhs2);\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_lhs::<B>(grad)\n },\n |grad| {\n //lhs**rhs * ln(lhs) * grad\n let rhs = rhs_4rhs.unwrap();\n let lhs1 = lhs_4rhs.unwrap();\n let lhs2 = lhs1.clone();\n let tmp = B::float_powf(lhs1, rhs);\n let value = B::float_mul(tmp, B::float_log(lhs2));\n let grad = B::float_mul(grad, value);\n\n broadcast.backward_rhs::<B>(grad)\n },\n );\n }\n }\n\n let broadcast = BinaryOpsBroadcast::new::<B>(&lhs.primitive, &rhs.primitive);\n\n match PowF\n .prepare::<C>([lhs.node.clone(), rhs.node.clone()])\n .memory_bound()\n .retro_forward(RetroPowf::<B>::new(lhs.node.id, rhs.node.id))\n .parents([&lhs, &rhs])\n .stateful()\n {\n OpsKind::Tracked(mut prep) => {\n let lhs_state = prep.checkpoint(&lhs);\n let rhs_state = prep.checkpoint(&rhs);\n prep.finish(\n (lhs_state, rhs_state, broadcast),\n B::float_powf(lhs.primitive, rhs.primitive),\n )\n }\n OpsKind::UnTracked(prep) => prep.finish(B::float_powf(lhs.primitive, rhs.primitive)),\n }\n }\n\n fn float_sign(tensor: FloatTensor<Self>) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Sign;\n\n retro_unary!(RetroSign, B::float_sign);\n\n impl<B: Backend> Backward<B, 1> for Sign {\n type State = ();\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n unary::<B, _>(ops.parents, ops.node, grads, |grad|\n // Always return 0 because the derivative of the sign function\n // does not contribute to gradient updates in a meaningful way.\n B::float_mul_scalar(grad, 0.elem()));\n }\n }\n\n Sign.prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroSign::<B>::new(tensor.node.id))\n .parents([&tensor])\n .stateless(B::float_sign(tensor.primitive))\n }\n\n fn float_expand(tensor: FloatTensor<Self>, shape: Shape) -> FloatTensor<Self> {\n // D1: tensor, D2: shape\n #[derive(Debug)]\n struct ExpandDim;\n\n #[derive(new, Debug)]\n struct RetroExpand<B: Backend> {\n input_id: NodeID,\n shape: Shape,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroExpand<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let input = states.get_state::<B::FloatTensorPrimitive>(&self.input_id);\n let out = B::float_expand(input, self.shape.clone());\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for ExpandDim {\n type State = (Shape, Shape);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (shape_in, shape_out) = ops.state;\n let ndims_in = shape_in.num_dims();\n let ndims_out = shape_out.num_dims();\n\n let mut shape_expanded = vec![1; ndims_out];\n\n debug_assert!(ndims_out >= ndims_in);\n\n for i in 0..ndims_in {\n shape_expanded[i + (ndims_out - ndims_in)] = shape_in.dims[i];\n }\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let shape_grad = grad.shape();\n let mut grad = grad;\n\n #[allow(clippy::needless_range_loop)]\n for i in 0..ndims_out {\n if shape_expanded[i] == 1 && shape_grad.dims[i] != 1 {\n grad = B::float_sum_dim(grad, i);\n }\n }\n\n B::float_reshape(grad, shape_in)\n });\n }\n }\n\n match ExpandDim\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroExpand::<B>::new(tensor.node.id, shape.clone()))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (tensor.primitive.shape(), shape.clone()),\n B::float_expand(tensor.primitive, shape),\n ),\n OpsKind::UnTracked(prep) => prep.finish(B::float_expand(tensor.primitive, shape)),\n }\n }\n\n fn float_sort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> FloatTensor<Self> {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n prep.finish((indices, shape), tensor)\n }\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_sort(tensor.primitive, dim, descending))\n }\n }\n }\n\n fn float_sort_with_indices(\n tensor: FloatTensor<Self>,\n dim: usize,\n descending: bool,\n ) -> (FloatTensor<Self>, IntTensor<B>) {\n match super::sort::SortDim\n .prepare::<C>([tensor.node])\n .compute_bound()\n .stateful()\n {\n OpsKind::Tracked(prep) => {\n let shape = tensor.primitive.shape();\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish((indices.clone(), shape), tensor);\n\n (tensor, indices)\n }\n OpsKind::UnTracked(prep) => {\n let (tensor, indices) =\n B::float_sort_with_indices(tensor.primitive, dim, descending);\n let tensor = prep.finish(tensor);\n\n (tensor, indices)\n }\n }\n }\n\n fn float_argsort(tensor: FloatTensor<Self>, dim: usize, descending: bool) -> IntTensor<B> {\n B::float_argsort(tensor.primitive, dim, descending)\n }\n\n fn float_repeat_dim(tensor: FloatTensor<Self>, dim: usize, times: usize) -> FloatTensor<Self> {\n #[derive(Debug)]\n struct Repeat;\n\n #[derive(new, Debug)]\n struct RetroRepeat<B: Backend> {\n tensor_id: NodeID,\n dim: usize,\n times: usize,\n _backend: PhantomData<B>,\n }\n\n impl<B: Backend> RetroForward for RetroRepeat<B> {\n fn forward(&self, states: &mut BackwardStates, out_node: NodeID) {\n let tensor = states.get_state::<B::FloatTensorPrimitive>(&self.tensor_id);\n let out = B::float_repeat_dim(tensor, self.dim, self.times);\n states.save(out_node, out)\n }\n }\n\n impl<B: Backend> Backward<B, 1> for Repeat {\n type State = (usize, usize);\n\n fn backward(\n self,\n ops: Ops<Self::State, 1>,\n grads: &mut Gradients,\n _checkpointer: &mut Checkpointer,\n ) {\n let (dim, times) = ops.state;\n\n unary::<B, _>(ops.parents, ops.node, grads, |grad| {\n let mut dims = grad.shape().dims;\n let orig_dim_size = dims[dim] / times;\n if orig_dim_size > 1 {\n dims[dim] = orig_dim_size;\n let orig_dims = dims.clone();\n dims.insert(dim + 1, times); // shape [..., orig_dim_size, times, ...]\n let grad = B::float_reshape(grad, Shape::from(dims));\n let grad = B::float_sum_dim(grad, dim + 1); // sum over repeat times\n B::float_reshape(grad, Shape::from(orig_dims))\n } else {\n B::float_sum_dim(grad, dim)\n }\n });\n }\n }\n\n match Repeat\n .prepare::<C>([tensor.node.clone()])\n .memory_bound()\n .retro_forward(RetroRepeat::<B>::new(tensor.node.id, dim, times))\n .parents([&tensor])\n .stateful()\n {\n OpsKind::Tracked(prep) => prep.finish(\n (dim, times),\n B::float_repeat_dim(tensor.primitive, dim, times),\n ),\n OpsKind::UnTracked(prep) => {\n prep.finish(B::float_repeat_dim(tensor.primitive, dim, times))\n }\n }\n }\n\n fn float_cast(tensor: FloatTensor<Self>, dtype: burn_tensor::FloatDType) -> FloatTensor<Self> {\n AutodiffTensor::new(B::float_cast(tensor.primitive, dtype))\n }\n\n // TODO: Implement float_prod and float_sum\n // https://github.com/tracel-ai/burn/issues/1458\n}",
"class_signature": "impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.