mirror of
https://github.com/cwinfo/matterbridge.git
synced 2025-06-26 08:39:24 +00:00
Add dependencies/vendor (whatsapp)
This commit is contained in:
27
vendor/filippo.io/edwards25519/LICENSE
generated
vendored
Normal file
27
vendor/filippo.io/edwards25519/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
14
vendor/filippo.io/edwards25519/README.md
generated
vendored
Normal file
14
vendor/filippo.io/edwards25519/README.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# filippo.io/edwards25519
|
||||
|
||||
```
|
||||
import "filippo.io/edwards25519"
|
||||
```
|
||||
|
||||
This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
|
||||
Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
|
||||
|
||||
The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255.
|
||||
|
||||
Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
|
||||
|
||||
Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
|
20
vendor/filippo.io/edwards25519/doc.go
generated
vendored
Normal file
20
vendor/filippo.io/edwards25519/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package edwards25519 implements group logic for the twisted Edwards curve
|
||||
//
|
||||
// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
|
||||
//
|
||||
// This is better known as the Edwards curve equivalent to Curve25519, and is
|
||||
// the curve used by the Ed25519 signature scheme.
|
||||
//
|
||||
// Most users don't need this package, and should instead use crypto/ed25519 for
|
||||
// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
|
||||
// github.com/gtank/ristretto255 for prime order group logic.
|
||||
//
|
||||
// However, developers who do need to interact with low-level edwards25519
|
||||
// operations can use this package, which is an extended version of
|
||||
// crypto/ed25519/internal/edwards25519 from the standard library repackaged as
|
||||
// an importable module.
|
||||
package edwards25519
|
428
vendor/filippo.io/edwards25519/edwards25519.go
generated
vendored
Normal file
428
vendor/filippo.io/edwards25519/edwards25519.go
generated
vendored
Normal file
@ -0,0 +1,428 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"filippo.io/edwards25519/field"
|
||||
)
|
||||
|
||||
// Point types.
|
||||
|
||||
type projP1xP1 struct {
|
||||
X, Y, Z, T field.Element
|
||||
}
|
||||
|
||||
type projP2 struct {
|
||||
X, Y, Z field.Element
|
||||
}
|
||||
|
||||
// Point represents a point on the edwards25519 curve.
|
||||
//
|
||||
// This type works similarly to math/big.Int, and all arguments and receivers
|
||||
// are allowed to alias.
|
||||
//
|
||||
// The zero value is NOT valid, and it may be used only as a receiver.
|
||||
type Point struct {
|
||||
// The point is internally represented in extended coordinates (X, Y, Z, T)
|
||||
// where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
|
||||
x, y, z, t field.Element
|
||||
|
||||
// Make the type not comparable (i.e. used with == or as a map key), as
|
||||
// equivalent points can be represented by different Go values.
|
||||
_ incomparable
|
||||
}
|
||||
|
||||
type incomparable [0]func()
|
||||
|
||||
func checkInitialized(points ...*Point) {
|
||||
for _, p := range points {
|
||||
if p.x == (field.Element{}) && p.y == (field.Element{}) {
|
||||
panic("edwards25519: use of uninitialized Point")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type projCached struct {
|
||||
YplusX, YminusX, Z, T2d field.Element
|
||||
}
|
||||
|
||||
type affineCached struct {
|
||||
YplusX, YminusX, T2d field.Element
|
||||
}
|
||||
|
||||
// Constructors.
|
||||
|
||||
func (v *projP2) Zero() *projP2 {
|
||||
v.X.Zero()
|
||||
v.Y.One()
|
||||
v.Z.One()
|
||||
return v
|
||||
}
|
||||
|
||||
// identity is the point at infinity.
|
||||
var identity, _ = new(Point).SetBytes([]byte{
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
|
||||
// NewIdentityPoint returns a new Point set to the identity.
|
||||
func NewIdentityPoint() *Point {
|
||||
return new(Point).Set(identity)
|
||||
}
|
||||
|
||||
// generator is the canonical curve basepoint. See TestGenerator for the
|
||||
// correspondence of this encoding with the values in RFC 8032.
|
||||
var generator, _ = new(Point).SetBytes([]byte{
|
||||
0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
|
||||
|
||||
// NewGeneratorPoint returns a new Point set to the canonical generator.
|
||||
func NewGeneratorPoint() *Point {
|
||||
return new(Point).Set(generator)
|
||||
}
|
||||
|
||||
func (v *projCached) Zero() *projCached {
|
||||
v.YplusX.One()
|
||||
v.YminusX.One()
|
||||
v.Z.One()
|
||||
v.T2d.Zero()
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *affineCached) Zero() *affineCached {
|
||||
v.YplusX.One()
|
||||
v.YminusX.One()
|
||||
v.T2d.Zero()
|
||||
return v
|
||||
}
|
||||
|
||||
// Assignments.
|
||||
|
||||
// Set sets v = u, and returns v.
|
||||
func (v *Point) Set(u *Point) *Point {
|
||||
*v = *u
|
||||
return v
|
||||
}
|
||||
|
||||
// Encoding.
|
||||
|
||||
// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
|
||||
// Section 5.1.2.
|
||||
func (v *Point) Bytes() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var buf [32]byte
|
||||
return v.bytes(&buf)
|
||||
}
|
||||
|
||||
func (v *Point) bytes(buf *[32]byte) []byte {
|
||||
checkInitialized(v)
|
||||
|
||||
var zInv, x, y field.Element
|
||||
zInv.Invert(&v.z) // zInv = 1 / Z
|
||||
x.Multiply(&v.x, &zInv) // x = X / Z
|
||||
y.Multiply(&v.y, &zInv) // y = Y / Z
|
||||
|
||||
out := copyFieldElement(buf, &y)
|
||||
out[31] |= byte(x.IsNegative() << 7)
|
||||
return out
|
||||
}
|
||||
|
||||
var feOne = new(field.Element).One()
|
||||
|
||||
// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
|
||||
// represent a valid point on the curve, SetBytes returns nil and an error and
|
||||
// the receiver is unchanged. Otherwise, SetBytes returns v.
|
||||
//
|
||||
// Note that SetBytes accepts all non-canonical encodings of valid points.
|
||||
// That is, it follows decoding rules that match most implementations in
|
||||
// the ecosystem rather than RFC 8032.
|
||||
func (v *Point) SetBytes(x []byte) (*Point, error) {
|
||||
// Specifically, the non-canonical encodings that are accepted are
|
||||
// 1) the ones where the field element is not reduced (see the
|
||||
// (*field.Element).SetBytes docs) and
|
||||
// 2) the ones where the x-coordinate is zero and the sign bit is set.
|
||||
//
|
||||
// This is consistent with crypto/ed25519/internal/edwards25519. Read more
|
||||
// at https://hdevalence.ca/blog/2020-10-04-its-25519am, specifically the
|
||||
// "Canonical A, R" section.
|
||||
|
||||
y, err := new(field.Element).SetBytes(x)
|
||||
if err != nil {
|
||||
return nil, errors.New("edwards25519: invalid point encoding length")
|
||||
}
|
||||
|
||||
// -x² + y² = 1 + dx²y²
|
||||
// x² + dx²y² = x²(dy² + 1) = y² - 1
|
||||
// x² = (y² - 1) / (dy² + 1)
|
||||
|
||||
// u = y² - 1
|
||||
y2 := new(field.Element).Square(y)
|
||||
u := new(field.Element).Subtract(y2, feOne)
|
||||
|
||||
// v = dy² + 1
|
||||
vv := new(field.Element).Multiply(y2, d)
|
||||
vv = vv.Add(vv, feOne)
|
||||
|
||||
// x = +√(u/v)
|
||||
xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
|
||||
if wasSquare == 0 {
|
||||
return nil, errors.New("edwards25519: invalid point encoding")
|
||||
}
|
||||
|
||||
// Select the negative square root if the sign bit is set.
|
||||
xxNeg := new(field.Element).Negate(xx)
|
||||
xx = xx.Select(xxNeg, xx, int(x[31]>>7))
|
||||
|
||||
v.x.Set(xx)
|
||||
v.y.Set(y)
|
||||
v.z.One()
|
||||
v.t.Multiply(xx, y) // xy = T / Z
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
|
||||
copy(buf[:], v.Bytes())
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// Conversions.
|
||||
|
||||
func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
|
||||
v.X.Multiply(&p.X, &p.T)
|
||||
v.Y.Multiply(&p.Y, &p.Z)
|
||||
v.Z.Multiply(&p.Z, &p.T)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP2) FromP3(p *Point) *projP2 {
|
||||
v.X.Set(&p.x)
|
||||
v.Y.Set(&p.y)
|
||||
v.Z.Set(&p.z)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *Point) fromP1xP1(p *projP1xP1) *Point {
|
||||
v.x.Multiply(&p.X, &p.T)
|
||||
v.y.Multiply(&p.Y, &p.Z)
|
||||
v.z.Multiply(&p.Z, &p.T)
|
||||
v.t.Multiply(&p.X, &p.Y)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *Point) fromP2(p *projP2) *Point {
|
||||
v.x.Multiply(&p.X, &p.Z)
|
||||
v.y.Multiply(&p.Y, &p.Z)
|
||||
v.z.Square(&p.Z)
|
||||
v.t.Multiply(&p.X, &p.Y)
|
||||
return v
|
||||
}
|
||||
|
||||
// d is a constant in the curve equation.
|
||||
var d, _ = new(field.Element).SetBytes([]byte{
|
||||
0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
|
||||
0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
|
||||
0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
|
||||
0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
|
||||
var d2 = new(field.Element).Add(d, d)
|
||||
|
||||
func (v *projCached) FromP3(p *Point) *projCached {
|
||||
v.YplusX.Add(&p.y, &p.x)
|
||||
v.YminusX.Subtract(&p.y, &p.x)
|
||||
v.Z.Set(&p.z)
|
||||
v.T2d.Multiply(&p.t, d2)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *affineCached) FromP3(p *Point) *affineCached {
|
||||
v.YplusX.Add(&p.y, &p.x)
|
||||
v.YminusX.Subtract(&p.y, &p.x)
|
||||
v.T2d.Multiply(&p.t, d2)
|
||||
|
||||
var invZ field.Element
|
||||
invZ.Invert(&p.z)
|
||||
v.YplusX.Multiply(&v.YplusX, &invZ)
|
||||
v.YminusX.Multiply(&v.YminusX, &invZ)
|
||||
v.T2d.Multiply(&v.T2d, &invZ)
|
||||
return v
|
||||
}
|
||||
|
||||
// (Re)addition and subtraction.
|
||||
|
||||
// Add sets v = p + q, and returns v.
|
||||
func (v *Point) Add(p, q *Point) *Point {
|
||||
checkInitialized(p, q)
|
||||
qCached := new(projCached).FromP3(q)
|
||||
result := new(projP1xP1).Add(p, qCached)
|
||||
return v.fromP1xP1(result)
|
||||
}
|
||||
|
||||
// Subtract sets v = p - q, and returns v.
|
||||
func (v *Point) Subtract(p, q *Point) *Point {
|
||||
checkInitialized(p, q)
|
||||
qCached := new(projCached).FromP3(q)
|
||||
result := new(projP1xP1).Sub(p, qCached)
|
||||
return v.fromP1xP1(result)
|
||||
}
|
||||
|
||||
func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YplusX)
|
||||
MM.Multiply(&YminusX, &q.YminusX)
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
ZZ2.Multiply(&p.z, &q.Z)
|
||||
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Add(&ZZ2, &TT2d)
|
||||
v.T.Subtract(&ZZ2, &TT2d)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
|
||||
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
ZZ2.Multiply(&p.z, &q.Z)
|
||||
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
|
||||
v.T.Add(&ZZ2, &TT2d) // flipped sign
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YplusX)
|
||||
MM.Multiply(&YminusX, &q.YminusX)
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
|
||||
Z2.Add(&p.z, &p.z)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Add(&Z2, &TT2d)
|
||||
v.T.Subtract(&Z2, &TT2d)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
|
||||
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
|
||||
Z2.Add(&p.z, &p.z)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Subtract(&Z2, &TT2d) // flipped sign
|
||||
v.T.Add(&Z2, &TT2d) // flipped sign
|
||||
return v
|
||||
}
|
||||
|
||||
// Doubling.
|
||||
|
||||
func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
|
||||
var XX, YY, ZZ2, XplusYsq field.Element
|
||||
|
||||
XX.Square(&p.X)
|
||||
YY.Square(&p.Y)
|
||||
ZZ2.Square(&p.Z)
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
XplusYsq.Add(&p.X, &p.Y)
|
||||
XplusYsq.Square(&XplusYsq)
|
||||
|
||||
v.Y.Add(&YY, &XX)
|
||||
v.Z.Subtract(&YY, &XX)
|
||||
|
||||
v.X.Subtract(&XplusYsq, &v.Y)
|
||||
v.T.Subtract(&ZZ2, &v.Z)
|
||||
return v
|
||||
}
|
||||
|
||||
// Negation.
|
||||
|
||||
// Negate sets v = -p, and returns v.
|
||||
func (v *Point) Negate(p *Point) *Point {
|
||||
checkInitialized(p)
|
||||
v.x.Negate(&p.x)
|
||||
v.y.Set(&p.y)
|
||||
v.z.Set(&p.z)
|
||||
v.t.Negate(&p.t)
|
||||
return v
|
||||
}
|
||||
|
||||
// Equal returns 1 if v is equivalent to u, and 0 otherwise.
|
||||
func (v *Point) Equal(u *Point) int {
|
||||
checkInitialized(v, u)
|
||||
|
||||
var t1, t2, t3, t4 field.Element
|
||||
t1.Multiply(&v.x, &u.z)
|
||||
t2.Multiply(&u.x, &v.z)
|
||||
t3.Multiply(&v.y, &u.z)
|
||||
t4.Multiply(&u.y, &v.z)
|
||||
|
||||
return t1.Equal(&t2) & t3.Equal(&t4)
|
||||
}
|
||||
|
||||
// Constant-time operations
|
||||
|
||||
// Select sets v to a if cond == 1 and to b if cond == 0.
|
||||
func (v *projCached) Select(a, b *projCached, cond int) *projCached {
|
||||
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
|
||||
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
|
||||
v.Z.Select(&a.Z, &b.Z, cond)
|
||||
v.T2d.Select(&a.T2d, &b.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// Select sets v to a if cond == 1 and to b if cond == 0.
|
||||
func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
|
||||
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
|
||||
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
|
||||
v.T2d.Select(&a.T2d, &b.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
|
||||
func (v *projCached) CondNeg(cond int) *projCached {
|
||||
v.YplusX.Swap(&v.YminusX, cond)
|
||||
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
|
||||
func (v *affineCached) CondNeg(cond int) *affineCached {
|
||||
v.YplusX.Swap(&v.YminusX, cond)
|
||||
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
|
||||
return v
|
||||
}
|
343
vendor/filippo.io/edwards25519/extra.go
generated
vendored
Normal file
343
vendor/filippo.io/edwards25519/extra.go
generated
vendored
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
// This file contains additional functionality that is not included in the
|
||||
// upstream crypto/ed25519/internal/edwards25519 package.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"filippo.io/edwards25519/field"
|
||||
)
|
||||
|
||||
// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
|
||||
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
|
||||
func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap. Don't change the style without making
|
||||
// sure it doesn't increase the inliner cost.
|
||||
var e [4]field.Element
|
||||
X, Y, Z, T = v.extendedCoordinates(&e)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
|
||||
checkInitialized(v)
|
||||
X = e[0].Set(&v.x)
|
||||
Y = e[1].Set(&v.y)
|
||||
Z = e[2].Set(&v.z)
|
||||
T = e[3].Set(&v.t)
|
||||
return
|
||||
}
|
||||
|
||||
// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
|
||||
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
|
||||
//
|
||||
// If the coordinates are invalid or don't represent a valid point on the curve,
|
||||
// SetExtendedCoordinates returns nil and an error and the receiver is
|
||||
// unchanged. Otherwise, SetExtendedCoordinates returns v.
|
||||
func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
|
||||
if !isOnCurve(X, Y, Z, T) {
|
||||
return nil, errors.New("edwards25519: invalid point coordinates")
|
||||
}
|
||||
v.x.Set(X)
|
||||
v.y.Set(Y)
|
||||
v.z.Set(Z)
|
||||
v.t.Set(T)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func isOnCurve(X, Y, Z, T *field.Element) bool {
|
||||
var lhs, rhs field.Element
|
||||
XX := new(field.Element).Square(X)
|
||||
YY := new(field.Element).Square(Y)
|
||||
ZZ := new(field.Element).Square(Z)
|
||||
TT := new(field.Element).Square(T)
|
||||
// -x² + y² = 1 + dx²y²
|
||||
// -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
|
||||
// -X² + Y² = Z² + dT²
|
||||
lhs.Subtract(YY, XX)
|
||||
rhs.Multiply(d, TT).Add(&rhs, ZZ)
|
||||
if lhs.Equal(&rhs) != 1 {
|
||||
return false
|
||||
}
|
||||
// xy = T/Z
|
||||
// XY/Z² = T/Z
|
||||
// XY = TZ
|
||||
lhs.Multiply(X, Y)
|
||||
rhs.Multiply(T, Z)
|
||||
return lhs.Equal(&rhs) == 1
|
||||
}
|
||||
|
||||
// BytesMontgomery converts v to a point on the birationally-equivalent
|
||||
// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
|
||||
// according to RFC 7748.
|
||||
//
|
||||
// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
|
||||
// to the same value. If v is the identity point, BytesMontgomery returns 32
|
||||
// zero bytes, analogously to the X25519 function.
|
||||
func (v *Point) BytesMontgomery() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var buf [32]byte
|
||||
return v.bytesMontgomery(&buf)
|
||||
}
|
||||
|
||||
func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
|
||||
checkInitialized(v)
|
||||
|
||||
// RFC 7748, Section 4.1 provides the bilinear map to calculate the
|
||||
// Montgomery u-coordinate
|
||||
//
|
||||
// u = (1 + y) / (1 - y)
|
||||
//
|
||||
// where y = Y / Z.
|
||||
|
||||
var y, recip, u field.Element
|
||||
|
||||
y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
|
||||
recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
|
||||
u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
|
||||
|
||||
return copyFieldElement(buf, &u)
|
||||
}
|
||||
|
||||
// MultByCofactor sets v = 8 * p, and returns v.
|
||||
func (v *Point) MultByCofactor(p *Point) *Point {
|
||||
checkInitialized(p)
|
||||
result := projP1xP1{}
|
||||
pp := (&projP2{}).FromP3(p)
|
||||
result.Double(pp)
|
||||
pp.FromP1xP1(&result)
|
||||
result.Double(pp)
|
||||
pp.FromP1xP1(&result)
|
||||
result.Double(pp)
|
||||
return v.fromP1xP1(&result)
|
||||
}
|
||||
|
||||
// Given k > 0, set s = s**(2*i).
|
||||
func (s *Scalar) pow2k(k int) {
|
||||
for i := 0; i < k; i++ {
|
||||
s.Multiply(s, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Invert sets s to the inverse of a nonzero scalar v, and returns s.
|
||||
//
|
||||
// If t is zero, Invert returns zero.
|
||||
func (s *Scalar) Invert(t *Scalar) *Scalar {
|
||||
// Uses a hardcoded sliding window of width 4.
|
||||
var table [8]Scalar
|
||||
var tt Scalar
|
||||
tt.Multiply(t, t)
|
||||
table[0] = *t
|
||||
for i := 0; i < 7; i++ {
|
||||
table[i+1].Multiply(&table[i], &tt)
|
||||
}
|
||||
// Now table = [t**1, t**3, t**7, t**11, t**13, t**15]
|
||||
// so t**k = t[k/2] for odd k
|
||||
|
||||
// To compute the sliding window digits, use the following Sage script:
|
||||
|
||||
// sage: import itertools
|
||||
// sage: def sliding_window(w,k):
|
||||
// ....: digits = []
|
||||
// ....: while k > 0:
|
||||
// ....: if k % 2 == 1:
|
||||
// ....: kmod = k % (2**w)
|
||||
// ....: digits.append(kmod)
|
||||
// ....: k = k - kmod
|
||||
// ....: else:
|
||||
// ....: digits.append(0)
|
||||
// ....: k = k // 2
|
||||
// ....: return digits
|
||||
|
||||
// Now we can compute s roughly as follows:
|
||||
|
||||
// sage: s = 1
|
||||
// sage: for coeff in reversed(sliding_window(4,l-2)):
|
||||
// ....: s = s*s
|
||||
// ....: if coeff > 0 :
|
||||
// ....: s = s*t**coeff
|
||||
|
||||
// This works on one bit at a time, with many runs of zeros.
|
||||
// The digits can be collapsed into [(count, coeff)] as follows:
|
||||
|
||||
// sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
|
||||
|
||||
// Entries of the form (k, 0) turn into pow2k(k)
|
||||
// Entries of the form (1, coeff) turn into a squaring and then a table lookup.
|
||||
// We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
|
||||
|
||||
*s = table[1/2]
|
||||
s.pow2k(127 + 1)
|
||||
s.Multiply(s, &table[1/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[13/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[5/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[1/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(5 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(9 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[13/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
|
||||
//
|
||||
// Execution time depends only on the lengths of the two slices, which must match.
|
||||
func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
|
||||
if len(scalars) != len(points) {
|
||||
panic("edwards25519: called MultiScalarMult with different size inputs")
|
||||
}
|
||||
checkInitialized(points...)
|
||||
|
||||
// Proceed as in the single-base case, but share doublings
|
||||
// between each point in the multiscalar equation.
|
||||
|
||||
// Build lookup tables for each point
|
||||
tables := make([]projLookupTable, len(points))
|
||||
for i := range tables {
|
||||
tables[i].FromP3(points[i])
|
||||
}
|
||||
// Compute signed radix-16 digits for each scalar
|
||||
digits := make([][64]int8, len(scalars))
|
||||
for i := range digits {
|
||||
digits[i] = scalars[i].signedRadix16()
|
||||
}
|
||||
|
||||
// Unwrap first loop iteration to save computing 16*identity
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
// Lookup-and-add the appropriate multiple of each input point
|
||||
for j := range tables {
|
||||
tables[j].SelectInto(multiple, digits[j][63])
|
||||
tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // update v
|
||||
}
|
||||
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
|
||||
for i := 62; i >= 0; i-- {
|
||||
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
|
||||
// Lookup-and-add the appropriate multiple of each input point
|
||||
for j := range tables {
|
||||
tables[j].SelectInto(multiple, digits[j][i])
|
||||
tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // update v
|
||||
}
|
||||
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
|
||||
//
|
||||
// Execution time depends on the inputs.
|
||||
func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
|
||||
if len(scalars) != len(points) {
|
||||
panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
|
||||
}
|
||||
checkInitialized(points...)
|
||||
|
||||
// Generalize double-base NAF computation to arbitrary sizes.
|
||||
// Here all the points are dynamic, so we only use the smaller
|
||||
// tables.
|
||||
|
||||
// Build lookup tables for each point
|
||||
tables := make([]nafLookupTable5, len(points))
|
||||
for i := range tables {
|
||||
tables[i].FromP3(points[i])
|
||||
}
|
||||
// Compute a NAF for each scalar
|
||||
nafs := make([][256]int8, len(scalars))
|
||||
for i := range nafs {
|
||||
nafs[i] = scalars[i].nonAdjacentForm(5)
|
||||
}
|
||||
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
tmp2.Zero()
|
||||
|
||||
// Move from high to low bits, doubling the accumulator
|
||||
// at each iteration and checking whether there is a nonzero
|
||||
// coefficient to look up a multiple of.
|
||||
//
|
||||
// Skip trying to find the first nonzero coefficent, because
|
||||
// searching might be more work than a few extra doublings.
|
||||
for i := 255; i >= 0; i-- {
|
||||
tmp1.Double(tmp2)
|
||||
|
||||
for j := range nafs {
|
||||
if nafs[j][i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
tables[j].SelectInto(multiple, nafs[j][i])
|
||||
tmp1.Add(v, multiple)
|
||||
} else if nafs[j][i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
tables[j].SelectInto(multiple, -nafs[j][i])
|
||||
tmp1.Sub(v, multiple)
|
||||
}
|
||||
}
|
||||
|
||||
tmp2.FromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
v.fromP2(tmp2)
|
||||
return v
|
||||
}
|
419
vendor/filippo.io/edwards25519/field/fe.go
generated
vendored
Normal file
419
vendor/filippo.io/edwards25519/field/fe.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package field implements fast arithmetic modulo 2^255-19.
|
||||
package field
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// Element represents an element of the field GF(2^255-19). Note that this
|
||||
// is not a cryptographically secure group, and should only be used to interact
|
||||
// with edwards25519.Point coordinates.
|
||||
//
|
||||
// This type works similarly to math/big.Int, and all arguments and receivers
|
||||
// are allowed to alias.
|
||||
//
|
||||
// The zero value is a valid zero element.
|
||||
type Element struct {
|
||||
// An element t represents the integer
|
||||
// t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
|
||||
//
|
||||
// Between operations, all limbs are expected to be lower than 2^52.
|
||||
l0 uint64
|
||||
l1 uint64
|
||||
l2 uint64
|
||||
l3 uint64
|
||||
l4 uint64
|
||||
}
|
||||
|
||||
const maskLow51Bits uint64 = (1 << 51) - 1
|
||||
|
||||
var feZero = &Element{0, 0, 0, 0, 0}
|
||||
|
||||
// Zero sets v = 0, and returns v.
|
||||
func (v *Element) Zero() *Element {
|
||||
*v = *feZero
|
||||
return v
|
||||
}
|
||||
|
||||
var feOne = &Element{1, 0, 0, 0, 0}
|
||||
|
||||
// One sets v = 1, and returns v.
|
||||
func (v *Element) One() *Element {
|
||||
*v = *feOne
|
||||
return v
|
||||
}
|
||||
|
||||
// reduce reduces v modulo 2^255 - 19 and returns it.
|
||||
func (v *Element) reduce() *Element {
|
||||
v.carryPropagate()
|
||||
|
||||
// After the light reduction we now have a field element representation
|
||||
// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
|
||||
|
||||
// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
|
||||
// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
|
||||
c := (v.l0 + 19) >> 51
|
||||
c = (v.l1 + c) >> 51
|
||||
c = (v.l2 + c) >> 51
|
||||
c = (v.l3 + c) >> 51
|
||||
c = (v.l4 + c) >> 51
|
||||
|
||||
// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
|
||||
// effectively applying the reduction identity to the carry.
|
||||
v.l0 += 19 * c
|
||||
|
||||
v.l1 += v.l0 >> 51
|
||||
v.l0 = v.l0 & maskLow51Bits
|
||||
v.l2 += v.l1 >> 51
|
||||
v.l1 = v.l1 & maskLow51Bits
|
||||
v.l3 += v.l2 >> 51
|
||||
v.l2 = v.l2 & maskLow51Bits
|
||||
v.l4 += v.l3 >> 51
|
||||
v.l3 = v.l3 & maskLow51Bits
|
||||
// no additional carry
|
||||
v.l4 = v.l4 & maskLow51Bits
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Add sets v = a + b, and returns v.
|
||||
func (v *Element) Add(a, b *Element) *Element {
|
||||
v.l0 = a.l0 + b.l0
|
||||
v.l1 = a.l1 + b.l1
|
||||
v.l2 = a.l2 + b.l2
|
||||
v.l3 = a.l3 + b.l3
|
||||
v.l4 = a.l4 + b.l4
|
||||
// Using the generic implementation here is actually faster than the
|
||||
// assembly. Probably because the body of this function is so simple that
|
||||
// the compiler can figure out better optimizations by inlining the carry
|
||||
// propagation.
|
||||
return v.carryPropagateGeneric()
|
||||
}
|
||||
|
||||
// Subtract sets v = a - b, and returns v.
|
||||
func (v *Element) Subtract(a, b *Element) *Element {
|
||||
// We first add 2 * p, to guarantee the subtraction won't underflow, and
|
||||
// then subtract b (which can be up to 2^255 + 2^13 * 19).
|
||||
v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
|
||||
v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
|
||||
v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
|
||||
v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
|
||||
v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
|
||||
return v.carryPropagate()
|
||||
}
|
||||
|
||||
// Negate sets v = -a, and returns v.
|
||||
func (v *Element) Negate(a *Element) *Element {
|
||||
return v.Subtract(feZero, a)
|
||||
}
|
||||
|
||||
// Invert sets v = 1/z mod p, and returns v.
|
||||
//
|
||||
// If z == 0, Invert returns v = 0.
|
||||
func (v *Element) Invert(z *Element) *Element {
|
||||
// Inversion is implemented as exponentiation with exponent p − 2. It uses the
|
||||
// same sequence of 255 squarings and 11 multiplications as [Curve25519].
|
||||
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
|
||||
|
||||
z2.Square(z) // 2
|
||||
t.Square(&z2) // 4
|
||||
t.Square(&t) // 8
|
||||
z9.Multiply(&t, z) // 9
|
||||
z11.Multiply(&z9, &z2) // 11
|
||||
t.Square(&z11) // 22
|
||||
z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
|
||||
|
||||
t.Square(&z2_5_0) // 2^6 - 2^1
|
||||
for i := 0; i < 4; i++ {
|
||||
t.Square(&t) // 2^10 - 2^5
|
||||
}
|
||||
z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
|
||||
|
||||
t.Square(&z2_10_0) // 2^11 - 2^1
|
||||
for i := 0; i < 9; i++ {
|
||||
t.Square(&t) // 2^20 - 2^10
|
||||
}
|
||||
z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
|
||||
|
||||
t.Square(&z2_20_0) // 2^21 - 2^1
|
||||
for i := 0; i < 19; i++ {
|
||||
t.Square(&t) // 2^40 - 2^20
|
||||
}
|
||||
t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
|
||||
|
||||
t.Square(&t) // 2^41 - 2^1
|
||||
for i := 0; i < 9; i++ {
|
||||
t.Square(&t) // 2^50 - 2^10
|
||||
}
|
||||
z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
|
||||
|
||||
t.Square(&z2_50_0) // 2^51 - 2^1
|
||||
for i := 0; i < 49; i++ {
|
||||
t.Square(&t) // 2^100 - 2^50
|
||||
}
|
||||
z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
|
||||
|
||||
t.Square(&z2_100_0) // 2^101 - 2^1
|
||||
for i := 0; i < 99; i++ {
|
||||
t.Square(&t) // 2^200 - 2^100
|
||||
}
|
||||
t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
|
||||
|
||||
t.Square(&t) // 2^201 - 2^1
|
||||
for i := 0; i < 49; i++ {
|
||||
t.Square(&t) // 2^250 - 2^50
|
||||
}
|
||||
t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
|
||||
|
||||
t.Square(&t) // 2^251 - 2^1
|
||||
t.Square(&t) // 2^252 - 2^2
|
||||
t.Square(&t) // 2^253 - 2^3
|
||||
t.Square(&t) // 2^254 - 2^4
|
||||
t.Square(&t) // 2^255 - 2^5
|
||||
|
||||
return v.Multiply(&t, &z11) // 2^255 - 21
|
||||
}
|
||||
|
||||
// Set sets v = a, and returns v.
|
||||
func (v *Element) Set(a *Element) *Element {
|
||||
*v = *a
|
||||
return v
|
||||
}
|
||||
|
||||
// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
|
||||
// not of the right length, SetUniformBytes returns nil and an error, and the
|
||||
// receiver is unchanged.
|
||||
//
|
||||
// Consistent with RFC 7748, the most significant bit (the high bit of the
|
||||
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
|
||||
// are accepted. Note that this is laxer than specified by RFC 8032.
|
||||
func (v *Element) SetBytes(x []byte) (*Element, error) {
|
||||
if len(x) != 32 {
|
||||
return nil, errors.New("edwards25519: invalid field element input size")
|
||||
}
|
||||
|
||||
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
|
||||
v.l0 = binary.LittleEndian.Uint64(x[0:8])
|
||||
v.l0 &= maskLow51Bits
|
||||
// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
|
||||
v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
|
||||
v.l1 &= maskLow51Bits
|
||||
// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
|
||||
v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
|
||||
v.l2 &= maskLow51Bits
|
||||
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
|
||||
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
|
||||
v.l3 &= maskLow51Bits
|
||||
// Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
|
||||
// Note: not bytes 25:33, shift 4, to avoid overread.
|
||||
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
|
||||
v.l4 &= maskLow51Bits
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Bytes returns the canonical 32-byte little-endian encoding of v.
|
||||
func (v *Element) Bytes() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var out [32]byte
|
||||
return v.bytes(&out)
|
||||
}
|
||||
|
||||
func (v *Element) bytes(out *[32]byte) []byte {
|
||||
t := *v
|
||||
t.reduce()
|
||||
|
||||
var buf [8]byte
|
||||
for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
|
||||
bitsOffset := i * 51
|
||||
binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
|
||||
for i, bb := range buf {
|
||||
off := bitsOffset/8 + i
|
||||
if off >= len(out) {
|
||||
break
|
||||
}
|
||||
out[off] |= bb
|
||||
}
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
// Equal returns 1 if v and u are equal, and 0 otherwise.
|
||||
func (v *Element) Equal(u *Element) int {
|
||||
sa, sv := u.Bytes(), v.Bytes()
|
||||
return subtle.ConstantTimeCompare(sa, sv)
|
||||
}
|
||||
|
||||
// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
|
||||
func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
|
||||
|
||||
// Select sets v to a if cond == 1, and to b if cond == 0.
|
||||
func (v *Element) Select(a, b *Element, cond int) *Element {
|
||||
m := mask64Bits(cond)
|
||||
v.l0 = (m & a.l0) | (^m & b.l0)
|
||||
v.l1 = (m & a.l1) | (^m & b.l1)
|
||||
v.l2 = (m & a.l2) | (^m & b.l2)
|
||||
v.l3 = (m & a.l3) | (^m & b.l3)
|
||||
v.l4 = (m & a.l4) | (^m & b.l4)
|
||||
return v
|
||||
}
|
||||
|
||||
// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
|
||||
func (v *Element) Swap(u *Element, cond int) {
|
||||
m := mask64Bits(cond)
|
||||
t := m & (v.l0 ^ u.l0)
|
||||
v.l0 ^= t
|
||||
u.l0 ^= t
|
||||
t = m & (v.l1 ^ u.l1)
|
||||
v.l1 ^= t
|
||||
u.l1 ^= t
|
||||
t = m & (v.l2 ^ u.l2)
|
||||
v.l2 ^= t
|
||||
u.l2 ^= t
|
||||
t = m & (v.l3 ^ u.l3)
|
||||
v.l3 ^= t
|
||||
u.l3 ^= t
|
||||
t = m & (v.l4 ^ u.l4)
|
||||
v.l4 ^= t
|
||||
u.l4 ^= t
|
||||
}
|
||||
|
||||
// IsNegative returns 1 if v is negative, and 0 otherwise.
|
||||
func (v *Element) IsNegative() int {
|
||||
return int(v.Bytes()[0] & 1)
|
||||
}
|
||||
|
||||
// Absolute sets v to |u|, and returns v.
|
||||
func (v *Element) Absolute(u *Element) *Element {
|
||||
return v.Select(new(Element).Negate(u), u, u.IsNegative())
|
||||
}
|
||||
|
||||
// Multiply sets v = x * y, and returns v.
|
||||
func (v *Element) Multiply(x, y *Element) *Element {
|
||||
feMul(v, x, y)
|
||||
return v
|
||||
}
|
||||
|
||||
// Square sets v = x * x, and returns v.
|
||||
func (v *Element) Square(x *Element) *Element {
|
||||
feSquare(v, x)
|
||||
return v
|
||||
}
|
||||
|
||||
// Mult32 sets v = x * y, and returns v.
|
||||
func (v *Element) Mult32(x *Element, y uint32) *Element {
|
||||
x0lo, x0hi := mul51(x.l0, y)
|
||||
x1lo, x1hi := mul51(x.l1, y)
|
||||
x2lo, x2hi := mul51(x.l2, y)
|
||||
x3lo, x3hi := mul51(x.l3, y)
|
||||
x4lo, x4hi := mul51(x.l4, y)
|
||||
v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
|
||||
v.l1 = x1lo + x0hi
|
||||
v.l2 = x2lo + x1hi
|
||||
v.l3 = x3lo + x2hi
|
||||
v.l4 = x4lo + x3hi
|
||||
// The hi portions are going to be only 32 bits, plus any previous excess,
|
||||
// so we can skip the carry propagation.
|
||||
return v
|
||||
}
|
||||
|
||||
// mul51 returns lo + hi * 2⁵¹ = a * b.
|
||||
func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
|
||||
mh, ml := bits.Mul64(a, uint64(b))
|
||||
lo = ml & maskLow51Bits
|
||||
hi = (mh << 13) | (ml >> 51)
|
||||
return
|
||||
}
|
||||
|
||||
// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
|
||||
func (v *Element) Pow22523(x *Element) *Element {
|
||||
var t0, t1, t2 Element
|
||||
|
||||
t0.Square(x) // x^2
|
||||
t1.Square(&t0) // x^4
|
||||
t1.Square(&t1) // x^8
|
||||
t1.Multiply(x, &t1) // x^9
|
||||
t0.Multiply(&t0, &t1) // x^11
|
||||
t0.Square(&t0) // x^22
|
||||
t0.Multiply(&t1, &t0) // x^31
|
||||
t1.Square(&t0) // x^62
|
||||
for i := 1; i < 5; i++ { // x^992
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
|
||||
t1.Square(&t0) // 2^11 - 2
|
||||
for i := 1; i < 10; i++ { // 2^20 - 2^10
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t1.Multiply(&t1, &t0) // 2^20 - 1
|
||||
t2.Square(&t1) // 2^21 - 2
|
||||
for i := 1; i < 20; i++ { // 2^40 - 2^20
|
||||
t2.Square(&t2)
|
||||
}
|
||||
t1.Multiply(&t2, &t1) // 2^40 - 1
|
||||
t1.Square(&t1) // 2^41 - 2
|
||||
for i := 1; i < 10; i++ { // 2^50 - 2^10
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // 2^50 - 1
|
||||
t1.Square(&t0) // 2^51 - 2
|
||||
for i := 1; i < 50; i++ { // 2^100 - 2^50
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t1.Multiply(&t1, &t0) // 2^100 - 1
|
||||
t2.Square(&t1) // 2^101 - 2
|
||||
for i := 1; i < 100; i++ { // 2^200 - 2^100
|
||||
t2.Square(&t2)
|
||||
}
|
||||
t1.Multiply(&t2, &t1) // 2^200 - 1
|
||||
t1.Square(&t1) // 2^201 - 2
|
||||
for i := 1; i < 50; i++ { // 2^250 - 2^50
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // 2^250 - 1
|
||||
t0.Square(&t0) // 2^251 - 2
|
||||
t0.Square(&t0) // 2^252 - 4
|
||||
return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
|
||||
}
|
||||
|
||||
// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
|
||||
var sqrtM1 = &Element{1718705420411056, 234908883556509,
|
||||
2233514472574048, 2117202627021982, 765476049583133}
|
||||
|
||||
// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
|
||||
//
|
||||
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
|
||||
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
|
||||
// and returns r and 0.
|
||||
func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
|
||||
var a, b Element
|
||||
|
||||
// r = (u * v3) * (u * v7)^((p-5)/8)
|
||||
v2 := a.Square(v)
|
||||
uv3 := b.Multiply(u, b.Multiply(v2, v))
|
||||
uv7 := a.Multiply(uv3, a.Square(v2))
|
||||
r.Multiply(uv3, r.Pow22523(uv7))
|
||||
|
||||
check := a.Multiply(v, a.Square(r)) // check = v * r^2
|
||||
|
||||
uNeg := b.Negate(u)
|
||||
correctSignSqrt := check.Equal(u)
|
||||
flippedSignSqrt := check.Equal(uNeg)
|
||||
flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
|
||||
|
||||
rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
|
||||
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
|
||||
r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
|
||||
|
||||
r.Absolute(r) // Choose the nonnegative square root.
|
||||
return r, correctSignSqrt | flippedSignSqrt
|
||||
}
|
13
vendor/filippo.io/edwards25519/field/fe_amd64.go
generated
vendored
Normal file
13
vendor/filippo.io/edwards25519/field/fe_amd64.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
// feMul sets out = a * b. It works like feMulGeneric.
|
||||
//go:noescape
|
||||
func feMul(out *Element, a *Element, b *Element)
|
||||
|
||||
// feSquare sets out = a * a. It works like feSquareGeneric.
|
||||
//go:noescape
|
||||
func feSquare(out *Element, a *Element)
|
378
vendor/filippo.io/edwards25519/field/fe_amd64.s
generated
vendored
Normal file
378
vendor/filippo.io/edwards25519/field/fe_amd64.s
generated
vendored
Normal file
@ -0,0 +1,378 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func feMul(out *Element, a *Element, b *Element)
|
||||
TEXT ·feMul(SB), NOSPLIT, $0-24
|
||||
MOVQ a+8(FP), CX
|
||||
MOVQ b+16(FP), BX
|
||||
|
||||
// r0 = a0×b0
|
||||
MOVQ (CX), AX
|
||||
MULQ (BX)
|
||||
MOVQ AX, DI
|
||||
MOVQ DX, SI
|
||||
|
||||
// r0 += 19×a1×b4
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a2×b3
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a3×b2
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a4×b1
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r1 = a0×b1
|
||||
MOVQ (CX), AX
|
||||
MULQ 8(BX)
|
||||
MOVQ AX, R9
|
||||
MOVQ DX, R8
|
||||
|
||||
// r1 += a1×b0
|
||||
MOVQ 8(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a2×b4
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a3×b3
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a4×b2
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r2 = a0×b2
|
||||
MOVQ (CX), AX
|
||||
MULQ 16(BX)
|
||||
MOVQ AX, R11
|
||||
MOVQ DX, R10
|
||||
|
||||
// r2 += a1×b1
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += a2×b0
|
||||
MOVQ 16(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += 19×a3×b4
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += 19×a4×b3
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r3 = a0×b3
|
||||
MOVQ (CX), AX
|
||||
MULQ 24(BX)
|
||||
MOVQ AX, R13
|
||||
MOVQ DX, R12
|
||||
|
||||
// r3 += a1×b2
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += a2×b1
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += a3×b0
|
||||
MOVQ 24(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += 19×a4×b4
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r4 = a0×b4
|
||||
MOVQ (CX), AX
|
||||
MULQ 32(BX)
|
||||
MOVQ AX, R15
|
||||
MOVQ DX, R14
|
||||
|
||||
// r4 += a1×b3
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a2×b2
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a3×b1
|
||||
MOVQ 24(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a4×b0
|
||||
MOVQ 32(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// First reduction chain
|
||||
MOVQ $0x0007ffffffffffff, AX
|
||||
SHLQ $0x0d, DI, SI
|
||||
SHLQ $0x0d, R9, R8
|
||||
SHLQ $0x0d, R11, R10
|
||||
SHLQ $0x0d, R13, R12
|
||||
SHLQ $0x0d, R15, R14
|
||||
ANDQ AX, DI
|
||||
IMUL3Q $0x13, R14, R14
|
||||
ADDQ R14, DI
|
||||
ANDQ AX, R9
|
||||
ADDQ SI, R9
|
||||
ANDQ AX, R11
|
||||
ADDQ R8, R11
|
||||
ANDQ AX, R13
|
||||
ADDQ R10, R13
|
||||
ANDQ AX, R15
|
||||
ADDQ R12, R15
|
||||
|
||||
// Second reduction chain (carryPropagate)
|
||||
MOVQ DI, SI
|
||||
SHRQ $0x33, SI
|
||||
MOVQ R9, R8
|
||||
SHRQ $0x33, R8
|
||||
MOVQ R11, R10
|
||||
SHRQ $0x33, R10
|
||||
MOVQ R13, R12
|
||||
SHRQ $0x33, R12
|
||||
MOVQ R15, R14
|
||||
SHRQ $0x33, R14
|
||||
ANDQ AX, DI
|
||||
IMUL3Q $0x13, R14, R14
|
||||
ADDQ R14, DI
|
||||
ANDQ AX, R9
|
||||
ADDQ SI, R9
|
||||
ANDQ AX, R11
|
||||
ADDQ R8, R11
|
||||
ANDQ AX, R13
|
||||
ADDQ R10, R13
|
||||
ANDQ AX, R15
|
||||
ADDQ R12, R15
|
||||
|
||||
// Store output
|
||||
MOVQ out+0(FP), AX
|
||||
MOVQ DI, (AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R11, 16(AX)
|
||||
MOVQ R13, 24(AX)
|
||||
MOVQ R15, 32(AX)
|
||||
RET
|
||||
|
||||
// func feSquare(out *Element, a *Element)
|
||||
TEXT ·feSquare(SB), NOSPLIT, $0-16
|
||||
MOVQ a+8(FP), CX
|
||||
|
||||
// r0 = l0×l0
|
||||
MOVQ (CX), AX
|
||||
MULQ (CX)
|
||||
MOVQ AX, SI
|
||||
MOVQ DX, BX
|
||||
|
||||
// r0 += 38×l1×l4
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, SI
|
||||
ADCQ DX, BX
|
||||
|
||||
// r0 += 38×l2×l3
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, SI
|
||||
ADCQ DX, BX
|
||||
|
||||
// r1 = 2×l0×l1
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX, R8
|
||||
MOVQ DX, DI
|
||||
|
||||
// r1 += 38×l2×l4
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R8
|
||||
ADCQ DX, DI
|
||||
|
||||
// r1 += 19×l3×l3
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, R8
|
||||
ADCQ DX, DI
|
||||
|
||||
// r2 = 2×l0×l2
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX, R10
|
||||
MOVQ DX, R9
|
||||
|
||||
// r2 += l1×l1
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX, R10
|
||||
ADCQ DX, R9
|
||||
|
||||
// r2 += 38×l3×l4
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R10
|
||||
ADCQ DX, R9
|
||||
|
||||
// r3 = 2×l0×l3
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX, R12
|
||||
MOVQ DX, R11
|
||||
|
||||
// r3 += 2×l1×l2
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x02, AX, AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX, R12
|
||||
ADCQ DX, R11
|
||||
|
||||
// r3 += 19×l4×l4
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R12
|
||||
ADCQ DX, R11
|
||||
|
||||
// r4 = 2×l0×l4
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX, R14
|
||||
MOVQ DX, R13
|
||||
|
||||
// r4 += 2×l1×l3
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x02, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, R14
|
||||
ADCQ DX, R13
|
||||
|
||||
// r4 += l2×l2
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX, R14
|
||||
ADCQ DX, R13
|
||||
|
||||
// First reduction chain
|
||||
MOVQ $0x0007ffffffffffff, AX
|
||||
SHLQ $0x0d, SI, BX
|
||||
SHLQ $0x0d, R8, DI
|
||||
SHLQ $0x0d, R10, R9
|
||||
SHLQ $0x0d, R12, R11
|
||||
SHLQ $0x0d, R14, R13
|
||||
ANDQ AX, SI
|
||||
IMUL3Q $0x13, R13, R13
|
||||
ADDQ R13, SI
|
||||
ANDQ AX, R8
|
||||
ADDQ BX, R8
|
||||
ANDQ AX, R10
|
||||
ADDQ DI, R10
|
||||
ANDQ AX, R12
|
||||
ADDQ R9, R12
|
||||
ANDQ AX, R14
|
||||
ADDQ R11, R14
|
||||
|
||||
// Second reduction chain (carryPropagate)
|
||||
MOVQ SI, BX
|
||||
SHRQ $0x33, BX
|
||||
MOVQ R8, DI
|
||||
SHRQ $0x33, DI
|
||||
MOVQ R10, R9
|
||||
SHRQ $0x33, R9
|
||||
MOVQ R12, R11
|
||||
SHRQ $0x33, R11
|
||||
MOVQ R14, R13
|
||||
SHRQ $0x33, R13
|
||||
ANDQ AX, SI
|
||||
IMUL3Q $0x13, R13, R13
|
||||
ADDQ R13, SI
|
||||
ANDQ AX, R8
|
||||
ADDQ BX, R8
|
||||
ANDQ AX, R10
|
||||
ADDQ DI, R10
|
||||
ANDQ AX, R12
|
||||
ADDQ R9, R12
|
||||
ANDQ AX, R14
|
||||
ADDQ R11, R14
|
||||
|
||||
// Store output
|
||||
MOVQ out+0(FP), AX
|
||||
MOVQ SI, (AX)
|
||||
MOVQ R8, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R12, 24(AX)
|
||||
MOVQ R14, 32(AX)
|
||||
RET
|
12
vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
generated
vendored
Normal file
12
vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || !gc || purego
|
||||
// +build !amd64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
|
||||
|
||||
func feSquare(v, x *Element) { feSquareGeneric(v, x) }
|
16
vendor/filippo.io/edwards25519/field/fe_arm64.go
generated
vendored
Normal file
16
vendor/filippo.io/edwards25519/field/fe_arm64.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build arm64 && gc && !purego
|
||||
// +build arm64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
//go:noescape
|
||||
func carryPropagate(v *Element)
|
||||
|
||||
func (v *Element) carryPropagate() *Element {
|
||||
carryPropagate(v)
|
||||
return v
|
||||
}
|
42
vendor/filippo.io/edwards25519/field/fe_arm64.s
generated
vendored
Normal file
42
vendor/filippo.io/edwards25519/field/fe_arm64.s
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// carryPropagate works exactly like carryPropagateGeneric and uses the
|
||||
// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
|
||||
// avoids loading R0-R4 twice and uses LDP and STP.
|
||||
//
|
||||
// See https://golang.org/issues/43145 for the main compiler issue.
|
||||
//
|
||||
// func carryPropagate(v *Element)
|
||||
TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
|
||||
MOVD v+0(FP), R20
|
||||
|
||||
LDP 0(R20), (R0, R1)
|
||||
LDP 16(R20), (R2, R3)
|
||||
MOVD 32(R20), R4
|
||||
|
||||
AND $0x7ffffffffffff, R0, R10
|
||||
AND $0x7ffffffffffff, R1, R11
|
||||
AND $0x7ffffffffffff, R2, R12
|
||||
AND $0x7ffffffffffff, R3, R13
|
||||
AND $0x7ffffffffffff, R4, R14
|
||||
|
||||
ADD R0>>51, R11, R11
|
||||
ADD R1>>51, R12, R12
|
||||
ADD R2>>51, R13, R13
|
||||
ADD R3>>51, R14, R14
|
||||
// R4>>51 * 19 + R10 -> R10
|
||||
LSR $51, R4, R21
|
||||
MOVD $19, R22
|
||||
MADD R22, R10, R21, R10
|
||||
|
||||
STP (R10, R11), 0(R20)
|
||||
STP (R12, R13), 16(R20)
|
||||
MOVD R14, 32(R20)
|
||||
|
||||
RET
|
12
vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
generated
vendored
Normal file
12
vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !arm64 || !gc || purego
|
||||
// +build !arm64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
func (v *Element) carryPropagate() *Element {
|
||||
return v.carryPropagateGeneric()
|
||||
}
|
264
vendor/filippo.io/edwards25519/field/fe_generic.go
generated
vendored
Normal file
264
vendor/filippo.io/edwards25519/field/fe_generic.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package field
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
// bits.Mul64 and bits.Add64 intrinsics.
|
||||
type uint128 struct {
|
||||
lo, hi uint64
|
||||
}
|
||||
|
||||
// mul64 returns a * b.
|
||||
func mul64(a, b uint64) uint128 {
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
// addMul64 returns v + a * b.
|
||||
func addMul64(v uint128, a, b uint64) uint128 {
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
lo, c := bits.Add64(lo, v.lo, 0)
|
||||
hi, _ = bits.Add64(hi, v.hi, c)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
|
||||
func shiftRightBy51(a uint128) uint64 {
|
||||
return (a.hi << (64 - 51)) | (a.lo >> 51)
|
||||
}
|
||||
|
||||
func feMulGeneric(v, a, b *Element) {
|
||||
a0 := a.l0
|
||||
a1 := a.l1
|
||||
a2 := a.l2
|
||||
a3 := a.l3
|
||||
a4 := a.l4
|
||||
|
||||
b0 := b.l0
|
||||
b1 := b.l1
|
||||
b2 := b.l2
|
||||
b3 := b.l3
|
||||
b4 := b.l4
|
||||
|
||||
// Limb multiplication works like pen-and-paper columnar multiplication, but
|
||||
// with 51-bit limbs instead of digits.
|
||||
//
|
||||
// a4 a3 a2 a1 a0 x
|
||||
// b4 b3 b2 b1 b0 =
|
||||
// ------------------------
|
||||
// a4b0 a3b0 a2b0 a1b0 a0b0 +
|
||||
// a4b1 a3b1 a2b1 a1b1 a0b1 +
|
||||
// a4b2 a3b2 a2b2 a1b2 a0b2 +
|
||||
// a4b3 a3b3 a2b3 a1b3 a0b3 +
|
||||
// a4b4 a3b4 a2b4 a1b4 a0b4 =
|
||||
// ----------------------------------------------
|
||||
// r8 r7 r6 r5 r4 r3 r2 r1 r0
|
||||
//
|
||||
// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
|
||||
// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
|
||||
// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
|
||||
//
|
||||
// Reduction can be carried out simultaneously to multiplication. For
|
||||
// example, we do not compute r5: whenever the result of a multiplication
|
||||
// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
|
||||
//
|
||||
// a4b0 a3b0 a2b0 a1b0 a0b0 +
|
||||
// a3b1 a2b1 a1b1 a0b1 19×a4b1 +
|
||||
// a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
|
||||
// a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
|
||||
// a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
|
||||
// --------------------------------------
|
||||
// r4 r3 r2 r1 r0
|
||||
//
|
||||
// Finally we add up the columns into wide, overlapping limbs.
|
||||
|
||||
a1_19 := a1 * 19
|
||||
a2_19 := a2 * 19
|
||||
a3_19 := a3 * 19
|
||||
a4_19 := a4 * 19
|
||||
|
||||
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
|
||||
r0 := mul64(a0, b0)
|
||||
r0 = addMul64(r0, a1_19, b4)
|
||||
r0 = addMul64(r0, a2_19, b3)
|
||||
r0 = addMul64(r0, a3_19, b2)
|
||||
r0 = addMul64(r0, a4_19, b1)
|
||||
|
||||
// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
|
||||
r1 := mul64(a0, b1)
|
||||
r1 = addMul64(r1, a1, b0)
|
||||
r1 = addMul64(r1, a2_19, b4)
|
||||
r1 = addMul64(r1, a3_19, b3)
|
||||
r1 = addMul64(r1, a4_19, b2)
|
||||
|
||||
// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
|
||||
r2 := mul64(a0, b2)
|
||||
r2 = addMul64(r2, a1, b1)
|
||||
r2 = addMul64(r2, a2, b0)
|
||||
r2 = addMul64(r2, a3_19, b4)
|
||||
r2 = addMul64(r2, a4_19, b3)
|
||||
|
||||
// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
|
||||
r3 := mul64(a0, b3)
|
||||
r3 = addMul64(r3, a1, b2)
|
||||
r3 = addMul64(r3, a2, b1)
|
||||
r3 = addMul64(r3, a3, b0)
|
||||
r3 = addMul64(r3, a4_19, b4)
|
||||
|
||||
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
|
||||
r4 := mul64(a0, b4)
|
||||
r4 = addMul64(r4, a1, b3)
|
||||
r4 = addMul64(r4, a2, b2)
|
||||
r4 = addMul64(r4, a3, b1)
|
||||
r4 = addMul64(r4, a4, b0)
|
||||
|
||||
// After the multiplication, we need to reduce (carry) the five coefficients
|
||||
// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
|
||||
// to respect the Element invariant.
|
||||
//
|
||||
// Overall, the reduction works the same as carryPropagate, except with
|
||||
// wider inputs: we take the carry for each coefficient by shifting it right
|
||||
// by 51, and add it to the limb above it. The top carry is multiplied by 19
|
||||
// according to the reduction identity and added to the lowest limb.
|
||||
//
|
||||
// The largest coefficient (r0) will be at most 111 bits, which guarantees
|
||||
// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
|
||||
//
|
||||
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
|
||||
// r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
|
||||
// r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
|
||||
// r0 < 2⁷ × 2⁵² × 2⁵²
|
||||
// r0 < 2¹¹¹
|
||||
//
|
||||
// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
|
||||
// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
|
||||
// allows us to easily apply the reduction identity.
|
||||
//
|
||||
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
|
||||
// r4 < 5 × 2⁵² × 2⁵²
|
||||
// r4 < 2¹⁰⁷
|
||||
//
|
||||
|
||||
c0 := shiftRightBy51(r0)
|
||||
c1 := shiftRightBy51(r1)
|
||||
c2 := shiftRightBy51(r2)
|
||||
c3 := shiftRightBy51(r3)
|
||||
c4 := shiftRightBy51(r4)
|
||||
|
||||
rr0 := r0.lo&maskLow51Bits + c4*19
|
||||
rr1 := r1.lo&maskLow51Bits + c0
|
||||
rr2 := r2.lo&maskLow51Bits + c1
|
||||
rr3 := r3.lo&maskLow51Bits + c2
|
||||
rr4 := r4.lo&maskLow51Bits + c3
|
||||
|
||||
// Now all coefficients fit into 64-bit registers but are still too large to
|
||||
// be passed around as a Element. We therefore do one last carry chain,
|
||||
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
|
||||
*v = Element{rr0, rr1, rr2, rr3, rr4}
|
||||
v.carryPropagate()
|
||||
}
|
||||
|
||||
func feSquareGeneric(v, a *Element) {
|
||||
l0 := a.l0
|
||||
l1 := a.l1
|
||||
l2 := a.l2
|
||||
l3 := a.l3
|
||||
l4 := a.l4
|
||||
|
||||
// Squaring works precisely like multiplication above, but thanks to its
|
||||
// symmetry we get to group a few terms together.
|
||||
//
|
||||
// l4 l3 l2 l1 l0 x
|
||||
// l4 l3 l2 l1 l0 =
|
||||
// ------------------------
|
||||
// l4l0 l3l0 l2l0 l1l0 l0l0 +
|
||||
// l4l1 l3l1 l2l1 l1l1 l0l1 +
|
||||
// l4l2 l3l2 l2l2 l1l2 l0l2 +
|
||||
// l4l3 l3l3 l2l3 l1l3 l0l3 +
|
||||
// l4l4 l3l4 l2l4 l1l4 l0l4 =
|
||||
// ----------------------------------------------
|
||||
// r8 r7 r6 r5 r4 r3 r2 r1 r0
|
||||
//
|
||||
// l4l0 l3l0 l2l0 l1l0 l0l0 +
|
||||
// l3l1 l2l1 l1l1 l0l1 19×l4l1 +
|
||||
// l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
|
||||
// l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
|
||||
// l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
|
||||
// --------------------------------------
|
||||
// r4 r3 r2 r1 r0
|
||||
//
|
||||
// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
|
||||
// only three Mul64 and four Add64, instead of five and eight.
|
||||
|
||||
l0_2 := l0 * 2
|
||||
l1_2 := l1 * 2
|
||||
|
||||
l1_38 := l1 * 38
|
||||
l2_38 := l2 * 38
|
||||
l3_38 := l3 * 38
|
||||
|
||||
l3_19 := l3 * 19
|
||||
l4_19 := l4 * 19
|
||||
|
||||
// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
|
||||
r0 := mul64(l0, l0)
|
||||
r0 = addMul64(r0, l1_38, l4)
|
||||
r0 = addMul64(r0, l2_38, l3)
|
||||
|
||||
// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
|
||||
r1 := mul64(l0_2, l1)
|
||||
r1 = addMul64(r1, l2_38, l4)
|
||||
r1 = addMul64(r1, l3_19, l3)
|
||||
|
||||
// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
|
||||
r2 := mul64(l0_2, l2)
|
||||
r2 = addMul64(r2, l1, l1)
|
||||
r2 = addMul64(r2, l3_38, l4)
|
||||
|
||||
// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
|
||||
r3 := mul64(l0_2, l3)
|
||||
r3 = addMul64(r3, l1_2, l2)
|
||||
r3 = addMul64(r3, l4_19, l4)
|
||||
|
||||
// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
|
||||
r4 := mul64(l0_2, l4)
|
||||
r4 = addMul64(r4, l1_2, l3)
|
||||
r4 = addMul64(r4, l2, l2)
|
||||
|
||||
c0 := shiftRightBy51(r0)
|
||||
c1 := shiftRightBy51(r1)
|
||||
c2 := shiftRightBy51(r2)
|
||||
c3 := shiftRightBy51(r3)
|
||||
c4 := shiftRightBy51(r4)
|
||||
|
||||
rr0 := r0.lo&maskLow51Bits + c4*19
|
||||
rr1 := r1.lo&maskLow51Bits + c0
|
||||
rr2 := r2.lo&maskLow51Bits + c1
|
||||
rr3 := r3.lo&maskLow51Bits + c2
|
||||
rr4 := r4.lo&maskLow51Bits + c3
|
||||
|
||||
*v = Element{rr0, rr1, rr2, rr3, rr4}
|
||||
v.carryPropagate()
|
||||
}
|
||||
|
||||
// carryPropagate brings the limbs below 52 bits by applying the reduction
|
||||
// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
|
||||
func (v *Element) carryPropagateGeneric() *Element {
|
||||
c0 := v.l0 >> 51
|
||||
c1 := v.l1 >> 51
|
||||
c2 := v.l2 >> 51
|
||||
c3 := v.l3 >> 51
|
||||
c4 := v.l4 >> 51
|
||||
|
||||
v.l0 = v.l0&maskLow51Bits + c4*19
|
||||
v.l1 = v.l1&maskLow51Bits + c0
|
||||
v.l2 = v.l2&maskLow51Bits + c1
|
||||
v.l3 = v.l3&maskLow51Bits + c2
|
||||
v.l4 = v.l4&maskLow51Bits + c3
|
||||
|
||||
return v
|
||||
}
|
1027
vendor/filippo.io/edwards25519/scalar.go
generated
vendored
Normal file
1027
vendor/filippo.io/edwards25519/scalar.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
214
vendor/filippo.io/edwards25519/scalarmult.go
generated
vendored
Normal file
214
vendor/filippo.io/edwards25519/scalarmult.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import "sync"
|
||||
|
||||
// basepointTable is a set of 32 affineLookupTables, where table i is generated
|
||||
// from 256i * basepoint. It is precomputed the first time it's used.
|
||||
func basepointTable() *[32]affineLookupTable {
|
||||
basepointTablePrecomp.initOnce.Do(func() {
|
||||
p := NewGeneratorPoint()
|
||||
for i := 0; i < 32; i++ {
|
||||
basepointTablePrecomp.table[i].FromP3(p)
|
||||
for j := 0; j < 8; j++ {
|
||||
p.Add(p, p)
|
||||
}
|
||||
}
|
||||
})
|
||||
return &basepointTablePrecomp.table
|
||||
}
|
||||
|
||||
var basepointTablePrecomp struct {
|
||||
table [32]affineLookupTable
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
|
||||
// returns v.
|
||||
//
|
||||
// The scalar multiplication is done in constant time.
|
||||
func (v *Point) ScalarBaseMult(x *Scalar) *Point {
|
||||
basepointTable := basepointTable()
|
||||
|
||||
// Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
|
||||
// as described in the Ed25519 paper
|
||||
//
|
||||
// Group even and odd coefficients
|
||||
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
|
||||
// + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
|
||||
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
|
||||
// + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
|
||||
//
|
||||
// We use a lookup table for each i to get x_i*16^(2*i)*B
|
||||
// and do four doublings to multiply by 16.
|
||||
digits := x.signedRadix16()
|
||||
|
||||
multiple := &affineCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
|
||||
// Accumulate the odd components first
|
||||
v.Set(NewIdentityPoint())
|
||||
for i := 1; i < 64; i += 2 {
|
||||
basepointTable[i/2].SelectInto(multiple, digits[i])
|
||||
tmp1.AddAffine(v, multiple)
|
||||
v.fromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
// Multiply by 16
|
||||
tmp2.FromP3(v) // tmp2 = v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // now v = 16*(odd components)
|
||||
|
||||
// Accumulate the even components
|
||||
for i := 0; i < 64; i += 2 {
|
||||
basepointTable[i/2].SelectInto(multiple, digits[i])
|
||||
tmp1.AddAffine(v, multiple)
|
||||
v.fromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// ScalarMult sets v = x * q, and returns v.
|
||||
//
|
||||
// The scalar multiplication is done in constant time.
|
||||
func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
|
||||
checkInitialized(q)
|
||||
|
||||
var table projLookupTable
|
||||
table.FromP3(q)
|
||||
|
||||
// Write x = sum(x_i * 16^i)
|
||||
// so x*Q = sum( Q*x_i*16^i )
|
||||
// = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
|
||||
// <------compute inside out---------
|
||||
//
|
||||
// We use the lookup table to get the x_i*Q values
|
||||
// and do four doublings to compute 16*Q
|
||||
digits := x.signedRadix16()
|
||||
|
||||
// Unwrap first loop iteration to save computing 16*identity
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
table.SelectInto(multiple, digits[63])
|
||||
|
||||
v.Set(NewIdentityPoint())
|
||||
tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
|
||||
for i := 62; i >= 0; i-- {
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
|
||||
table.SelectInto(multiple, digits[i])
|
||||
tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
|
||||
}
|
||||
v.fromP1xP1(tmp1)
|
||||
return v
|
||||
}
|
||||
|
||||
// basepointNafTable is the nafLookupTable8 for the basepoint.
|
||||
// It is precomputed the first time it's used.
|
||||
func basepointNafTable() *nafLookupTable8 {
|
||||
basepointNafTablePrecomp.initOnce.Do(func() {
|
||||
basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
|
||||
})
|
||||
return &basepointNafTablePrecomp.table
|
||||
}
|
||||
|
||||
var basepointNafTablePrecomp struct {
|
||||
table nafLookupTable8
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
|
||||
// generator, and returns v.
|
||||
//
|
||||
// Execution time depends on the inputs.
|
||||
func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
|
||||
checkInitialized(A)
|
||||
|
||||
// Similarly to the single variable-base approach, we compute
|
||||
// digits and use them with a lookup table. However, because
|
||||
// we are allowed to do variable-time operations, we don't
|
||||
// need constant-time lookups or constant-time digit
|
||||
// computations.
|
||||
//
|
||||
// So we use a non-adjacent form of some width w instead of
|
||||
// radix 16. This is like a binary representation (one digit
|
||||
// for each binary place) but we allow the digits to grow in
|
||||
// magnitude up to 2^{w-1} so that the nonzero digits are as
|
||||
// sparse as possible. Intuitively, this "condenses" the
|
||||
// "mass" of the scalar onto sparse coefficients (meaning
|
||||
// fewer additions).
|
||||
|
||||
basepointNafTable := basepointNafTable()
|
||||
var aTable nafLookupTable5
|
||||
aTable.FromP3(A)
|
||||
// Because the basepoint is fixed, we can use a wider NAF
|
||||
// corresponding to a bigger table.
|
||||
aNaf := a.nonAdjacentForm(5)
|
||||
bNaf := b.nonAdjacentForm(8)
|
||||
|
||||
// Find the first nonzero coefficient.
|
||||
i := 255
|
||||
for j := i; j >= 0; j-- {
|
||||
if aNaf[j] != 0 || bNaf[j] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
multA := &projCached{}
|
||||
multB := &affineCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
tmp2.Zero()
|
||||
|
||||
// Move from high to low bits, doubling the accumulator
|
||||
// at each iteration and checking whether there is a nonzero
|
||||
// coefficient to look up a multiple of.
|
||||
for ; i >= 0; i-- {
|
||||
tmp1.Double(tmp2)
|
||||
|
||||
// Only update v if we have a nonzero coeff to add in.
|
||||
if aNaf[i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
aTable.SelectInto(multA, aNaf[i])
|
||||
tmp1.Add(v, multA)
|
||||
} else if aNaf[i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
aTable.SelectInto(multA, -aNaf[i])
|
||||
tmp1.Sub(v, multA)
|
||||
}
|
||||
|
||||
if bNaf[i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
basepointNafTable.SelectInto(multB, bNaf[i])
|
||||
tmp1.AddAffine(v, multB)
|
||||
} else if bNaf[i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
basepointNafTable.SelectInto(multB, -bNaf[i])
|
||||
tmp1.SubAffine(v, multB)
|
||||
}
|
||||
|
||||
tmp2.FromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
v.fromP2(tmp2)
|
||||
return v
|
||||
}
|
129
vendor/filippo.io/edwards25519/tables.go
generated
vendored
Normal file
129
vendor/filippo.io/edwards25519/tables.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
)
|
||||
|
||||
// A dynamic lookup table for variable-base, constant-time scalar muls.
|
||||
type projLookupTable struct {
|
||||
points [8]projCached
|
||||
}
|
||||
|
||||
// A precomputed lookup table for fixed-base, constant-time scalar muls.
|
||||
type affineLookupTable struct {
|
||||
points [8]affineCached
|
||||
}
|
||||
|
||||
// A dynamic lookup table for variable-base, variable-time scalar muls.
|
||||
type nafLookupTable5 struct {
|
||||
points [8]projCached
|
||||
}
|
||||
|
||||
// A precomputed lookup table for fixed-base, variable-time scalar muls.
|
||||
type nafLookupTable8 struct {
|
||||
points [64]affineCached
|
||||
}
|
||||
|
||||
// Constructors.
|
||||
|
||||
// Builds a lookup table at runtime. Fast.
|
||||
func (v *projLookupTable) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
|
||||
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
|
||||
v.points[0].FromP3(q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
// Compute (i+1)*Q as Q + i*Q and convert to a ProjCached
|
||||
// This is needlessly complicated because the API has explicit
|
||||
// recievers instead of creating stack objects and relying on RVO
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// This is not optimised for speed; fixed-base tables should be precomputed.
|
||||
func (v *affineLookupTable) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
|
||||
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
|
||||
v.points[0].FromP3(q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
// Compute (i+1)*Q as Q + i*Q and convert to AffineCached
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a lookup table at runtime. Fast.
|
||||
func (v *nafLookupTable5) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
|
||||
// This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
|
||||
v.points[0].FromP3(q)
|
||||
q2 := Point{}
|
||||
q2.Add(q, q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// This is not optimised for speed; fixed-base tables should be precomputed.
|
||||
func (v *nafLookupTable8) FromP3(q *Point) {
|
||||
v.points[0].FromP3(q)
|
||||
q2 := Point{}
|
||||
q2.Add(q, q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 63; i++ {
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// Selectors.
|
||||
|
||||
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
|
||||
func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
|
||||
// Compute xabs = |x|
|
||||
xmask := x >> 7
|
||||
xabs := uint8((x + xmask) ^ xmask)
|
||||
|
||||
dest.Zero()
|
||||
for j := 1; j <= 8; j++ {
|
||||
// Set dest = j*Q if |x| = j
|
||||
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
|
||||
dest.Select(&v.points[j-1], dest, cond)
|
||||
}
|
||||
// Now dest = |x|*Q, conditionally negate to get x*Q
|
||||
dest.CondNeg(int(xmask & 1))
|
||||
}
|
||||
|
||||
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
|
||||
func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
|
||||
// Compute xabs = |x|
|
||||
xmask := x >> 7
|
||||
xabs := uint8((x + xmask) ^ xmask)
|
||||
|
||||
dest.Zero()
|
||||
for j := 1; j <= 8; j++ {
|
||||
// Set dest = j*Q if |x| = j
|
||||
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
|
||||
dest.Select(&v.points[j-1], dest, cond)
|
||||
}
|
||||
// Now dest = |x|*Q, conditionally negate to get x*Q
|
||||
dest.CondNeg(int(xmask & 1))
|
||||
}
|
||||
|
||||
// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
|
||||
func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
|
||||
*dest = v.points[x/2]
|
||||
}
|
||||
|
||||
// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
|
||||
func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
|
||||
*dest = v.points[x/2]
|
||||
}
|
Reference in New Issue
Block a user