mirror of https://github.com/lightninglabs/loop
Merge pull request #634 from GeorgeTsagk/sweep-batcher
Loop Out Sweep Batcherupdate-to-v0.27.0-beta
commit
df2db8055b
@ -0,0 +1,317 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.17.2
|
||||
// source: batch.sql
|
||||
|
||||
package sqlc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
const confirmBatch = `-- name: ConfirmBatch :exec
|
||||
UPDATE
|
||||
sweep_batches
|
||||
SET
|
||||
confirmed = TRUE
|
||||
WHERE
|
||||
id = $1
|
||||
`
|
||||
|
||||
func (q *Queries) ConfirmBatch(ctx context.Context, id int32) error {
|
||||
_, err := q.db.ExecContext(ctx, confirmBatch, id)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBatchSweeps = `-- name: GetBatchSweeps :many
|
||||
SELECT
|
||||
sweeps.id, sweeps.swap_hash, sweeps.batch_id, sweeps.outpoint_txid, sweeps.outpoint_index, sweeps.amt, sweeps.completed,
|
||||
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
|
||||
loopout_swaps.swap_hash, loopout_swaps.dest_address, loopout_swaps.swap_invoice, loopout_swaps.max_swap_routing_fee, loopout_swaps.sweep_conf_target, loopout_swaps.htlc_confirmations, loopout_swaps.outgoing_chan_set, loopout_swaps.prepay_invoice, loopout_swaps.max_prepay_routing_fee, loopout_swaps.publication_deadline, loopout_swaps.single_sweep,
|
||||
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
|
||||
FROM
|
||||
sweeps
|
||||
JOIN
|
||||
swaps ON sweeps.swap_hash = swaps.swap_hash
|
||||
JOIN
|
||||
loopout_swaps ON sweeps.swap_hash = loopout_swaps.swap_hash
|
||||
JOIN
|
||||
htlc_keys ON sweeps.swap_hash = htlc_keys.swap_hash
|
||||
WHERE
|
||||
sweeps.batch_id = $1
|
||||
ORDER BY
|
||||
sweeps.id ASC
|
||||
`
|
||||
|
||||
type GetBatchSweepsRow struct {
|
||||
ID int32
|
||||
SwapHash []byte
|
||||
BatchID int32
|
||||
OutpointTxid []byte
|
||||
OutpointIndex int32
|
||||
Amt int64
|
||||
Completed bool
|
||||
ID_2 int32
|
||||
SwapHash_2 []byte
|
||||
Preimage []byte
|
||||
InitiationTime time.Time
|
||||
AmountRequested int64
|
||||
CltvExpiry int32
|
||||
MaxMinerFee int64
|
||||
MaxSwapFee int64
|
||||
InitiationHeight int32
|
||||
ProtocolVersion int32
|
||||
Label string
|
||||
SwapHash_3 []byte
|
||||
DestAddress string
|
||||
SwapInvoice string
|
||||
MaxSwapRoutingFee int64
|
||||
SweepConfTarget int32
|
||||
HtlcConfirmations int32
|
||||
OutgoingChanSet string
|
||||
PrepayInvoice string
|
||||
MaxPrepayRoutingFee int64
|
||||
PublicationDeadline time.Time
|
||||
SingleSweep bool
|
||||
SwapHash_4 []byte
|
||||
SenderScriptPubkey []byte
|
||||
ReceiverScriptPubkey []byte
|
||||
SenderInternalPubkey []byte
|
||||
ReceiverInternalPubkey []byte
|
||||
ClientKeyFamily int32
|
||||
ClientKeyIndex int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetBatchSweeps(ctx context.Context, batchID int32) ([]GetBatchSweepsRow, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getBatchSweeps, batchID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetBatchSweepsRow
|
||||
for rows.Next() {
|
||||
var i GetBatchSweepsRow
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.SwapHash,
|
||||
&i.BatchID,
|
||||
&i.OutpointTxid,
|
||||
&i.OutpointIndex,
|
||||
&i.Amt,
|
||||
&i.Completed,
|
||||
&i.ID_2,
|
||||
&i.SwapHash_2,
|
||||
&i.Preimage,
|
||||
&i.InitiationTime,
|
||||
&i.AmountRequested,
|
||||
&i.CltvExpiry,
|
||||
&i.MaxMinerFee,
|
||||
&i.MaxSwapFee,
|
||||
&i.InitiationHeight,
|
||||
&i.ProtocolVersion,
|
||||
&i.Label,
|
||||
&i.SwapHash_3,
|
||||
&i.DestAddress,
|
||||
&i.SwapInvoice,
|
||||
&i.MaxSwapRoutingFee,
|
||||
&i.SweepConfTarget,
|
||||
&i.HtlcConfirmations,
|
||||
&i.OutgoingChanSet,
|
||||
&i.PrepayInvoice,
|
||||
&i.MaxPrepayRoutingFee,
|
||||
&i.PublicationDeadline,
|
||||
&i.SingleSweep,
|
||||
&i.SwapHash_4,
|
||||
&i.SenderScriptPubkey,
|
||||
&i.ReceiverScriptPubkey,
|
||||
&i.SenderInternalPubkey,
|
||||
&i.ReceiverInternalPubkey,
|
||||
&i.ClientKeyFamily,
|
||||
&i.ClientKeyIndex,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getSweepStatus = `-- name: GetSweepStatus :one
|
||||
SELECT
|
||||
COALESCE(s.completed, f.false_value) AS completed
|
||||
FROM
|
||||
(SELECT false AS false_value) AS f
|
||||
LEFT JOIN
|
||||
sweeps s ON s.swap_hash = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetSweepStatus(ctx context.Context, swapHash []byte) (bool, error) {
|
||||
row := q.db.QueryRowContext(ctx, getSweepStatus, swapHash)
|
||||
var completed bool
|
||||
err := row.Scan(&completed)
|
||||
return completed, err
|
||||
}
|
||||
|
||||
const getUnconfirmedBatches = `-- name: GetUnconfirmedBatches :many
|
||||
SELECT
|
||||
id, confirmed, batch_tx_id, batch_pk_script, last_rbf_height, last_rbf_sat_per_kw, max_timeout_distance
|
||||
FROM
|
||||
sweep_batches
|
||||
WHERE
|
||||
confirmed = FALSE
|
||||
`
|
||||
|
||||
func (q *Queries) GetUnconfirmedBatches(ctx context.Context) ([]SweepBatch, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getUnconfirmedBatches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []SweepBatch
|
||||
for rows.Next() {
|
||||
var i SweepBatch
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.Confirmed,
|
||||
&i.BatchTxID,
|
||||
&i.BatchPkScript,
|
||||
&i.LastRbfHeight,
|
||||
&i.LastRbfSatPerKw,
|
||||
&i.MaxTimeoutDistance,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const insertBatch = `-- name: InsertBatch :one
|
||||
INSERT INTO sweep_batches (
|
||||
confirmed,
|
||||
batch_tx_id,
|
||||
batch_pk_script,
|
||||
last_rbf_height,
|
||||
last_rbf_sat_per_kw,
|
||||
max_timeout_distance
|
||||
) VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6
|
||||
) RETURNING id
|
||||
`
|
||||
|
||||
type InsertBatchParams struct {
|
||||
Confirmed bool
|
||||
BatchTxID sql.NullString
|
||||
BatchPkScript []byte
|
||||
LastRbfHeight sql.NullInt32
|
||||
LastRbfSatPerKw sql.NullInt32
|
||||
MaxTimeoutDistance int32
|
||||
}
|
||||
|
||||
func (q *Queries) InsertBatch(ctx context.Context, arg InsertBatchParams) (int32, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertBatch,
|
||||
arg.Confirmed,
|
||||
arg.BatchTxID,
|
||||
arg.BatchPkScript,
|
||||
arg.LastRbfHeight,
|
||||
arg.LastRbfSatPerKw,
|
||||
arg.MaxTimeoutDistance,
|
||||
)
|
||||
var id int32
|
||||
err := row.Scan(&id)
|
||||
return id, err
|
||||
}
|
||||
|
||||
const updateBatch = `-- name: UpdateBatch :exec
|
||||
UPDATE sweep_batches SET
|
||||
confirmed = $2,
|
||||
batch_tx_id = $3,
|
||||
batch_pk_script = $4,
|
||||
last_rbf_height = $5,
|
||||
last_rbf_sat_per_kw = $6
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
type UpdateBatchParams struct {
|
||||
ID int32
|
||||
Confirmed bool
|
||||
BatchTxID sql.NullString
|
||||
BatchPkScript []byte
|
||||
LastRbfHeight sql.NullInt32
|
||||
LastRbfSatPerKw sql.NullInt32
|
||||
}
|
||||
|
||||
func (q *Queries) UpdateBatch(ctx context.Context, arg UpdateBatchParams) error {
|
||||
_, err := q.db.ExecContext(ctx, updateBatch,
|
||||
arg.ID,
|
||||
arg.Confirmed,
|
||||
arg.BatchTxID,
|
||||
arg.BatchPkScript,
|
||||
arg.LastRbfHeight,
|
||||
arg.LastRbfSatPerKw,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertSweep = `-- name: UpsertSweep :exec
|
||||
INSERT INTO sweeps (
|
||||
swap_hash,
|
||||
batch_id,
|
||||
outpoint_txid,
|
||||
outpoint_index,
|
||||
amt,
|
||||
completed
|
||||
) VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6
|
||||
) ON CONFLICT (swap_hash) DO UPDATE SET
|
||||
batch_id = $2,
|
||||
outpoint_txid = $3,
|
||||
outpoint_index = $4,
|
||||
amt = $5,
|
||||
completed = $6
|
||||
`
|
||||
|
||||
type UpsertSweepParams struct {
|
||||
SwapHash []byte
|
||||
BatchID int32
|
||||
OutpointTxid []byte
|
||||
OutpointIndex int32
|
||||
Amt int64
|
||||
Completed bool
|
||||
}
|
||||
|
||||
func (q *Queries) UpsertSweep(ctx context.Context, arg UpsertSweepParams) error {
|
||||
_, err := q.db.ExecContext(ctx, upsertSweep,
|
||||
arg.SwapHash,
|
||||
arg.BatchID,
|
||||
arg.OutpointTxid,
|
||||
arg.OutpointIndex,
|
||||
arg.Amt,
|
||||
arg.Completed,
|
||||
)
|
||||
return err
|
||||
}
|
@ -0,0 +1 @@
|
||||
ALTER TABLE loopout_swaps DROP COLUMN single_sweep;
|
@ -0,0 +1,4 @@
|
||||
-- is_external_addr indicates whether the destination address of the swap is not
|
||||
-- a wallet address. The default value used is TRUE in order to maintain the old
|
||||
-- behavior of swaps which doesn't override the destination address.
|
||||
ALTER TABLE loopout_swaps ADD single_sweep BOOLEAN NOT NULL DEFAULT TRUE;
|
@ -0,0 +1,2 @@
|
||||
DROP TABLE sweep_batches;
|
||||
DROP TABLE sweeps;
|
@ -0,0 +1,58 @@
|
||||
-- sweep_batches stores the on-going swaps that are batched together.
|
||||
CREATE TABLE sweep_batches (
|
||||
-- id is the autoincrementing primary key of the batch.
|
||||
id INTEGER PRIMARY KEY,
|
||||
|
||||
-- confirmed indicates whether this batch is confirmed.
|
||||
confirmed BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
|
||||
-- batch_tx_id is the transaction id of the batch transaction.
|
||||
batch_tx_id TEXT,
|
||||
|
||||
-- batch_pk_script is the pkscript of the batch transaction's output.
|
||||
batch_pk_script BLOB,
|
||||
|
||||
-- last_rbf_height was the last height at which we attempted to publish
|
||||
-- an rbf replacement transaction.
|
||||
last_rbf_height INTEGER,
|
||||
|
||||
-- last_rbf_sat_per_kw was the last sat per kw fee rate we used for the
|
||||
-- last published transaction.
|
||||
last_rbf_sat_per_kw INTEGER,
|
||||
|
||||
-- max_timeout_distance is the maximum distance the timeouts of the
|
||||
-- sweeps can have in the batch.
|
||||
max_timeout_distance INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- sweeps stores the individual sweeps that are part of a batch.
|
||||
CREATE TABLE sweeps (
|
||||
-- id is the autoincrementing primary key.
|
||||
id INTEGER PRIMARY KEY,
|
||||
|
||||
-- swap_hash is the hash of the swap that is being swept.
|
||||
swap_hash BLOB NOT NULL UNIQUE,
|
||||
|
||||
-- batch_id is the id of the batch this swap is part of.
|
||||
batch_id INTEGER NOT NULL,
|
||||
|
||||
-- outpoint_txid is the transaction id of the output being swept.
|
||||
outpoint_txid BLOB NOT NULL,
|
||||
|
||||
-- outpoint_index is the index of the output being swept.
|
||||
outpoint_index INTEGER NOT NULL,
|
||||
|
||||
-- amt is the amount of the output being swept.
|
||||
amt BIGINT NOT NULL,
|
||||
|
||||
-- completed indicates whether the sweep has been completed.
|
||||
completed BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
|
||||
-- Foreign key constraint to ensure that we reference an existing batch
|
||||
-- id.
|
||||
FOREIGN KEY (batch_id) REFERENCES sweep_batches(id),
|
||||
|
||||
-- Foreign key constraint to ensure that swap_hash references an
|
||||
-- existing swap.
|
||||
FOREIGN KEY (swap_hash) REFERENCES swaps(swap_hash)
|
||||
);
|
@ -0,0 +1,91 @@
|
||||
-- name: GetUnconfirmedBatches :many
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
sweep_batches
|
||||
WHERE
|
||||
confirmed = FALSE;
|
||||
|
||||
-- name: InsertBatch :one
|
||||
INSERT INTO sweep_batches (
|
||||
confirmed,
|
||||
batch_tx_id,
|
||||
batch_pk_script,
|
||||
last_rbf_height,
|
||||
last_rbf_sat_per_kw,
|
||||
max_timeout_distance
|
||||
) VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6
|
||||
) RETURNING id;
|
||||
|
||||
-- name: UpdateBatch :exec
|
||||
UPDATE sweep_batches SET
|
||||
confirmed = $2,
|
||||
batch_tx_id = $3,
|
||||
batch_pk_script = $4,
|
||||
last_rbf_height = $5,
|
||||
last_rbf_sat_per_kw = $6
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: ConfirmBatch :exec
|
||||
UPDATE
|
||||
sweep_batches
|
||||
SET
|
||||
confirmed = TRUE
|
||||
WHERE
|
||||
id = $1;
|
||||
|
||||
-- name: UpsertSweep :exec
|
||||
INSERT INTO sweeps (
|
||||
swap_hash,
|
||||
batch_id,
|
||||
outpoint_txid,
|
||||
outpoint_index,
|
||||
amt,
|
||||
completed
|
||||
) VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6
|
||||
) ON CONFLICT (swap_hash) DO UPDATE SET
|
||||
batch_id = $2,
|
||||
outpoint_txid = $3,
|
||||
outpoint_index = $4,
|
||||
amt = $5,
|
||||
completed = $6;
|
||||
|
||||
|
||||
-- name: GetBatchSweeps :many
|
||||
SELECT
|
||||
sweeps.*,
|
||||
swaps.*,
|
||||
loopout_swaps.*,
|
||||
htlc_keys.*
|
||||
FROM
|
||||
sweeps
|
||||
JOIN
|
||||
swaps ON sweeps.swap_hash = swaps.swap_hash
|
||||
JOIN
|
||||
loopout_swaps ON sweeps.swap_hash = loopout_swaps.swap_hash
|
||||
JOIN
|
||||
htlc_keys ON sweeps.swap_hash = htlc_keys.swap_hash
|
||||
WHERE
|
||||
sweeps.batch_id = $1
|
||||
ORDER BY
|
||||
sweeps.id ASC;
|
||||
|
||||
-- name: GetSweepStatus :one
|
||||
SELECT
|
||||
COALESCE(s.completed, f.false_value) AS completed
|
||||
FROM
|
||||
(SELECT false AS false_value) AS f
|
||||
LEFT JOIN
|
||||
sweeps s ON s.swap_hash = $1;
|
@ -0,0 +1,339 @@
|
||||
package loopdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lightninglabs/loop/test"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// StoreMock implements a mock client swap store.
|
||||
type StoreMock struct {
|
||||
LoopOutSwaps map[lntypes.Hash]*LoopOutContract
|
||||
LoopOutUpdates map[lntypes.Hash][]SwapStateData
|
||||
loopOutStoreChan chan LoopOutContract
|
||||
loopOutUpdateChan chan SwapStateData
|
||||
|
||||
LoopInSwaps map[lntypes.Hash]*LoopInContract
|
||||
LoopInUpdates map[lntypes.Hash][]SwapStateData
|
||||
loopInStoreChan chan LoopInContract
|
||||
loopInUpdateChan chan SwapStateData
|
||||
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// NewStoreMock instantiates a new mock store.
|
||||
func NewStoreMock(t *testing.T) *StoreMock {
|
||||
return &StoreMock{
|
||||
loopOutStoreChan: make(chan LoopOutContract, 1),
|
||||
loopOutUpdateChan: make(chan SwapStateData, 1),
|
||||
LoopOutSwaps: make(map[lntypes.Hash]*LoopOutContract),
|
||||
LoopOutUpdates: make(map[lntypes.Hash][]SwapStateData),
|
||||
|
||||
loopInStoreChan: make(chan LoopInContract, 1),
|
||||
loopInUpdateChan: make(chan SwapStateData, 1),
|
||||
LoopInSwaps: make(map[lntypes.Hash]*LoopInContract),
|
||||
LoopInUpdates: make(map[lntypes.Hash][]SwapStateData),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// FetchLoopOutSwaps returns all swaps currently in the store.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) FetchLoopOutSwaps(ctx context.Context) ([]*LoopOut, error) {
|
||||
result := []*LoopOut{}
|
||||
|
||||
for hash, contract := range s.LoopOutSwaps {
|
||||
updates := s.LoopOutUpdates[hash]
|
||||
events := make([]*LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &LoopOut{
|
||||
Loop: Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
result = append(result, swap)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FetchLoopOutSwaps returns all swaps currently in the store.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) FetchLoopOutSwap(ctx context.Context,
|
||||
hash lntypes.Hash) (*LoopOut, error) {
|
||||
|
||||
contract, ok := s.LoopOutSwaps[hash]
|
||||
if !ok {
|
||||
return nil, errors.New("swap not found")
|
||||
}
|
||||
|
||||
updates := s.LoopOutUpdates[hash]
|
||||
events := make([]*LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &LoopOut{
|
||||
Loop: Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
|
||||
return swap, nil
|
||||
}
|
||||
|
||||
// CreateLoopOut adds an initiated swap to the store.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||
swap *LoopOutContract) error {
|
||||
|
||||
_, ok := s.LoopOutSwaps[hash]
|
||||
if ok {
|
||||
return errors.New("swap already exists")
|
||||
}
|
||||
|
||||
s.LoopOutSwaps[hash] = swap
|
||||
s.LoopOutUpdates[hash] = []SwapStateData{}
|
||||
s.loopOutStoreChan <- *swap
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchLoopInSwaps returns all in swaps currently in the store.
|
||||
func (s *StoreMock) FetchLoopInSwaps(ctx context.Context) ([]*LoopIn,
|
||||
error) {
|
||||
|
||||
result := []*LoopIn{}
|
||||
|
||||
for hash, contract := range s.LoopInSwaps {
|
||||
updates := s.LoopInUpdates[hash]
|
||||
events := make([]*LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &LoopIn{
|
||||
Loop: Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
result = append(result, swap)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateLoopIn adds an initiated loop in swap to the store.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||
swap *LoopInContract) error {
|
||||
|
||||
_, ok := s.LoopInSwaps[hash]
|
||||
if ok {
|
||||
return errors.New("swap already exists")
|
||||
}
|
||||
|
||||
s.LoopInSwaps[hash] = swap
|
||||
s.LoopInUpdates[hash] = []SwapStateData{}
|
||||
s.loopInStoreChan <- *swap
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLoopOut stores a new event for a target loop out swap. This appends to
|
||||
// the event log for a particular swap as it goes through the various stages in
|
||||
// its lifetime.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) UpdateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||
time time.Time, state SwapStateData) error {
|
||||
|
||||
updates, ok := s.LoopOutUpdates[hash]
|
||||
if !ok {
|
||||
return errors.New("swap does not exists")
|
||||
}
|
||||
|
||||
updates = append(updates, state)
|
||||
s.LoopOutUpdates[hash] = updates
|
||||
s.loopOutUpdateChan <- state
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLoopIn stores a new event for a target loop in swap. This appends to
|
||||
// the event log for a particular swap as it goes through the various stages in
|
||||
// its lifetime.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||
time time.Time, state SwapStateData) error {
|
||||
|
||||
updates, ok := s.LoopInUpdates[hash]
|
||||
if !ok {
|
||||
return errors.New("swap does not exists")
|
||||
}
|
||||
|
||||
updates = append(updates, state)
|
||||
s.LoopInUpdates[hash] = updates
|
||||
s.loopInUpdateChan <- state
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutLiquidityParams writes the serialized `manager.Parameters` bytes into the
|
||||
// bucket.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) PutLiquidityParams(ctx context.Context,
|
||||
params []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchLiquidityParams reads the serialized `manager.Parameters` bytes from
|
||||
// the bucket.
|
||||
//
|
||||
// NOTE: Part of the SwapStore interface.
|
||||
func (s *StoreMock) FetchLiquidityParams(ctx context.Context) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Close closes the store.
|
||||
func (s *StoreMock) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDone asserts that the store mock has no pending operations.
|
||||
func (s *StoreMock) IsDone() error {
|
||||
select {
|
||||
case <-s.loopOutStoreChan:
|
||||
return errors.New("storeChan not empty")
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.loopOutUpdateChan:
|
||||
return errors.New("updateChan not empty")
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssertLoopOutStored asserts that a swap is stored.
|
||||
func (s *StoreMock) AssertLoopOutStored() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case <-s.loopOutStoreChan:
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be stored")
|
||||
}
|
||||
}
|
||||
|
||||
// AssertLoopOutState asserts that a specified state transition is persisted to
|
||||
// disk.
|
||||
func (s *StoreMock) AssertLoopOutState(expectedState SwapState) {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case state := <-s.loopOutUpdateChan:
|
||||
require.Equal(s.t, expectedState, state.State)
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap state to be stored")
|
||||
}
|
||||
}
|
||||
|
||||
// AssertLoopInStored asserts that a loop-in swap is stored.
|
||||
func (s *StoreMock) AssertLoopInStored() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case <-s.loopInStoreChan:
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be stored")
|
||||
}
|
||||
}
|
||||
|
||||
// assertLoopInState asserts that a specified state transition is persisted to
|
||||
// disk.
|
||||
func (s *StoreMock) AssertLoopInState(
|
||||
expectedState SwapState) SwapStateData {
|
||||
|
||||
s.t.Helper()
|
||||
|
||||
state := <-s.loopInUpdateChan
|
||||
require.Equal(s.t, expectedState, state.State)
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
// AssertStorePreimageReveal asserts that a swap is marked as preimage revealed.
|
||||
func (s *StoreMock) AssertStorePreimageReveal() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case state := <-s.loopOutUpdateChan:
|
||||
require.Equal(s.t, StatePreimageRevealed, state.State)
|
||||
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be marked as preimage revealed")
|
||||
}
|
||||
}
|
||||
|
||||
// AssertStoreFinished asserts that a swap is marked as finished.
|
||||
func (s *StoreMock) AssertStoreFinished(expectedResult SwapState) {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case state := <-s.loopOutUpdateChan:
|
||||
require.Equal(s.t, expectedResult, state.State)
|
||||
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be finished")
|
||||
}
|
||||
}
|
||||
|
||||
// BatchCreateLoopOut creates many loop out swaps in a batch.
|
||||
func (b *StoreMock) BatchCreateLoopOut(ctx context.Context,
|
||||
swaps map[lntypes.Hash]*LoopOutContract) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
// BatchCreateLoopIn creates many loop in swaps in a batch.
|
||||
func (b *StoreMock) BatchCreateLoopIn(ctx context.Context,
|
||||
swaps map[lntypes.Hash]*LoopInContract) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
// BatchInsertUpdate inserts many updates for a swap in a batch.
|
||||
func (b *StoreMock) BatchInsertUpdate(ctx context.Context,
|
||||
updateData map[lntypes.Hash][]BatchInsertUpdateData) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,322 +0,0 @@
|
||||
package loop
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lightninglabs/loop/loopdb"
|
||||
"github.com/lightninglabs/loop/test"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// storeMock implements a mock client swap store.
|
||||
type storeMock struct {
|
||||
loopOutSwaps map[lntypes.Hash]*loopdb.LoopOutContract
|
||||
loopOutUpdates map[lntypes.Hash][]loopdb.SwapStateData
|
||||
loopOutStoreChan chan loopdb.LoopOutContract
|
||||
loopOutUpdateChan chan loopdb.SwapStateData
|
||||
|
||||
loopInSwaps map[lntypes.Hash]*loopdb.LoopInContract
|
||||
loopInUpdates map[lntypes.Hash][]loopdb.SwapStateData
|
||||
loopInStoreChan chan loopdb.LoopInContract
|
||||
loopInUpdateChan chan loopdb.SwapStateData
|
||||
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// NewStoreMock instantiates a new mock store.
|
||||
func newStoreMock(t *testing.T) *storeMock {
|
||||
return &storeMock{
|
||||
loopOutStoreChan: make(chan loopdb.LoopOutContract, 1),
|
||||
loopOutUpdateChan: make(chan loopdb.SwapStateData, 1),
|
||||
loopOutSwaps: make(map[lntypes.Hash]*loopdb.LoopOutContract),
|
||||
loopOutUpdates: make(map[lntypes.Hash][]loopdb.SwapStateData),
|
||||
|
||||
loopInStoreChan: make(chan loopdb.LoopInContract, 1),
|
||||
loopInUpdateChan: make(chan loopdb.SwapStateData, 1),
|
||||
loopInSwaps: make(map[lntypes.Hash]*loopdb.LoopInContract),
|
||||
loopInUpdates: make(map[lntypes.Hash][]loopdb.SwapStateData),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// FetchLoopOutSwaps returns all swaps currently in the store.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) FetchLoopOutSwaps(ctx context.Context) ([]*loopdb.LoopOut, error) {
|
||||
result := []*loopdb.LoopOut{}
|
||||
|
||||
for hash, contract := range s.loopOutSwaps {
|
||||
updates := s.loopOutUpdates[hash]
|
||||
events := make([]*loopdb.LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &loopdb.LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &loopdb.LoopOut{
|
||||
Loop: loopdb.Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
result = append(result, swap)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FetchLoopOutSwaps returns all swaps currently in the store.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) FetchLoopOutSwap(ctx context.Context,
|
||||
hash lntypes.Hash) (*loopdb.LoopOut, error) {
|
||||
|
||||
contract, ok := s.loopOutSwaps[hash]
|
||||
if !ok {
|
||||
return nil, errors.New("swap not found")
|
||||
}
|
||||
|
||||
updates := s.loopOutUpdates[hash]
|
||||
events := make([]*loopdb.LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &loopdb.LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &loopdb.LoopOut{
|
||||
Loop: loopdb.Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
|
||||
return swap, nil
|
||||
}
|
||||
|
||||
// CreateLoopOut adds an initiated swap to the store.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||
swap *loopdb.LoopOutContract) error {
|
||||
|
||||
_, ok := s.loopOutSwaps[hash]
|
||||
if ok {
|
||||
return errors.New("swap already exists")
|
||||
}
|
||||
|
||||
s.loopOutSwaps[hash] = swap
|
||||
s.loopOutUpdates[hash] = []loopdb.SwapStateData{}
|
||||
s.loopOutStoreChan <- *swap
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchLoopInSwaps returns all in swaps currently in the store.
|
||||
func (s *storeMock) FetchLoopInSwaps(ctx context.Context) ([]*loopdb.LoopIn,
|
||||
error) {
|
||||
|
||||
result := []*loopdb.LoopIn{}
|
||||
|
||||
for hash, contract := range s.loopInSwaps {
|
||||
updates := s.loopInUpdates[hash]
|
||||
events := make([]*loopdb.LoopEvent, len(updates))
|
||||
for i, u := range updates {
|
||||
events[i] = &loopdb.LoopEvent{
|
||||
SwapStateData: u,
|
||||
}
|
||||
}
|
||||
|
||||
swap := &loopdb.LoopIn{
|
||||
Loop: loopdb.Loop{
|
||||
Hash: hash,
|
||||
Events: events,
|
||||
},
|
||||
Contract: contract,
|
||||
}
|
||||
result = append(result, swap)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateLoopIn adds an initiated loop in swap to the store.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||
swap *loopdb.LoopInContract) error {
|
||||
|
||||
_, ok := s.loopInSwaps[hash]
|
||||
if ok {
|
||||
return errors.New("swap already exists")
|
||||
}
|
||||
|
||||
s.loopInSwaps[hash] = swap
|
||||
s.loopInUpdates[hash] = []loopdb.SwapStateData{}
|
||||
s.loopInStoreChan <- *swap
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLoopOut stores a new event for a target loop out swap. This appends to
|
||||
// the event log for a particular swap as it goes through the various stages in
|
||||
// its lifetime.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) UpdateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||
time time.Time, state loopdb.SwapStateData) error {
|
||||
|
||||
updates, ok := s.loopOutUpdates[hash]
|
||||
if !ok {
|
||||
return errors.New("swap does not exists")
|
||||
}
|
||||
|
||||
updates = append(updates, state)
|
||||
s.loopOutUpdates[hash] = updates
|
||||
s.loopOutUpdateChan <- state
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLoopIn stores a new event for a target loop in swap. This appends to
|
||||
// the event log for a particular swap as it goes through the various stages in
|
||||
// its lifetime.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||
time time.Time, state loopdb.SwapStateData) error {
|
||||
|
||||
updates, ok := s.loopInUpdates[hash]
|
||||
if !ok {
|
||||
return errors.New("swap does not exists")
|
||||
}
|
||||
|
||||
updates = append(updates, state)
|
||||
s.loopInUpdates[hash] = updates
|
||||
s.loopInUpdateChan <- state
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutLiquidityParams writes the serialized `manager.Parameters` bytes into the
|
||||
// bucket.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) PutLiquidityParams(ctx context.Context,
|
||||
params []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchLiquidityParams reads the serialized `manager.Parameters` bytes from
|
||||
// the bucket.
|
||||
//
|
||||
// NOTE: Part of the loopdb.SwapStore interface.
|
||||
func (s *storeMock) FetchLiquidityParams(ctx context.Context) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *storeMock) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storeMock) isDone() error {
|
||||
select {
|
||||
case <-s.loopOutStoreChan:
|
||||
return errors.New("storeChan not empty")
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.loopOutUpdateChan:
|
||||
return errors.New("updateChan not empty")
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storeMock) assertLoopOutStored() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case <-s.loopOutStoreChan:
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be stored")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storeMock) assertLoopOutState(expectedState loopdb.SwapState) {
|
||||
s.t.Helper()
|
||||
|
||||
state := <-s.loopOutUpdateChan
|
||||
if state.State != expectedState {
|
||||
s.t.Fatalf("expected state %v, got %v", expectedState, state)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storeMock) assertLoopInStored() {
|
||||
s.t.Helper()
|
||||
|
||||
<-s.loopInStoreChan
|
||||
}
|
||||
|
||||
// assertLoopInState asserts that a specified state transition is persisted to
|
||||
// disk.
|
||||
func (s *storeMock) assertLoopInState(
|
||||
expectedState loopdb.SwapState) loopdb.SwapStateData {
|
||||
|
||||
s.t.Helper()
|
||||
|
||||
state := <-s.loopInUpdateChan
|
||||
require.Equal(s.t, expectedState, state.State)
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
func (s *storeMock) assertStorePreimageReveal() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case state := <-s.loopOutUpdateChan:
|
||||
require.Equal(s.t, loopdb.StatePreimageRevealed, state.State)
|
||||
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be marked as preimage revealed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storeMock) assertStoreFinished(expectedResult loopdb.SwapState) {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
case state := <-s.loopOutUpdateChan:
|
||||
require.Equal(s.t, expectedResult, state.State)
|
||||
|
||||
case <-time.After(test.Timeout):
|
||||
s.t.Fatalf("expected swap to be finished")
|
||||
}
|
||||
}
|
||||
func (b *storeMock) BatchCreateLoopOut(ctx context.Context,
|
||||
swaps map[lntypes.Hash]*loopdb.LoopOutContract) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (b *storeMock) BatchCreateLoopIn(ctx context.Context,
|
||||
swaps map[lntypes.Hash]*loopdb.LoopInContract) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (b *storeMock) BatchInsertUpdate(ctx context.Context,
|
||||
updateData map[lntypes.Hash][]loopdb.BatchInsertUpdateData) error {
|
||||
|
||||
return errors.New("not implemented")
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
package sweepbatcher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the
|
||||
// caller requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
UseLogger(build.NewSubLogger("SWEEP", nil))
|
||||
}
|
||||
|
||||
// batchPrefixLogger returns a logger that prefixes all log messages with the ID.
|
||||
func batchPrefixLogger(batchID string) btclog.Logger {
|
||||
return build.NewPrefixLog(fmt.Sprintf("[Batch %s]", batchID), log)
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
@ -0,0 +1,371 @@
|
||||
package sweepbatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightninglabs/loop/loopdb"
|
||||
"github.com/lightninglabs/loop/loopdb/sqlc"
|
||||
"github.com/lightningnetwork/lnd/clock"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
)
|
||||
|
||||
type BaseDB interface {
|
||||
// ConfirmBatch confirms a batch by setting the state to confirmed.
|
||||
ConfirmBatch(ctx context.Context, id int32) error
|
||||
|
||||
// GetBatchSweeps fetches all the sweeps that are part a batch.
|
||||
GetBatchSweeps(ctx context.Context, batchID int32) (
|
||||
[]sqlc.GetBatchSweepsRow, error)
|
||||
|
||||
// GetSweepStatus returns true if the sweep has been completed.
|
||||
GetSweepStatus(ctx context.Context, swapHash []byte) (bool, error)
|
||||
|
||||
// GetSwapUpdates fetches all the updates for a swap.
|
||||
GetSwapUpdates(ctx context.Context, swapHash []byte) (
|
||||
[]sqlc.SwapUpdate, error)
|
||||
|
||||
// FetchUnconfirmedSweepBatches fetches all the batches from the
|
||||
// database that are not in a confirmed state.
|
||||
GetUnconfirmedBatches(ctx context.Context) ([]sqlc.SweepBatch, error)
|
||||
|
||||
// InsertBatch inserts a batch into the database, returning the id of
|
||||
// the inserted batch.
|
||||
InsertBatch(ctx context.Context, arg sqlc.InsertBatchParams) (
|
||||
int32, error)
|
||||
|
||||
// UpdateBatch updates a batch in the database.
|
||||
UpdateBatch(ctx context.Context, arg sqlc.UpdateBatchParams) error
|
||||
|
||||
// UpsertSweep inserts a sweep into the database, or updates an existing
|
||||
// sweep if it already exists.
|
||||
UpsertSweep(ctx context.Context, arg sqlc.UpsertSweepParams) error
|
||||
|
||||
// ExecTx allows for executing a function in the context of a database
|
||||
// transaction.
|
||||
ExecTx(ctx context.Context, txOptions loopdb.TxOptions,
|
||||
txBody func(*sqlc.Queries) error) error
|
||||
}
|
||||
|
||||
// SQLStore manages the reservations in the database.
|
||||
type SQLStore struct {
|
||||
baseDb BaseDB
|
||||
|
||||
network *chaincfg.Params
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewSQLStore creates a new SQLStore.
|
||||
func NewSQLStore(db BaseDB, network *chaincfg.Params) *SQLStore {
|
||||
return &SQLStore{
|
||||
baseDb: db,
|
||||
network: network,
|
||||
clock: clock.NewDefaultClock(),
|
||||
}
|
||||
}
|
||||
|
||||
// FetchUnconfirmedSweepBatches fetches all the batches from the database that
|
||||
// are not in a confirmed state.
|
||||
func (s *SQLStore) FetchUnconfirmedSweepBatches(ctx context.Context) ([]*dbBatch,
|
||||
error) {
|
||||
|
||||
var batches []*dbBatch
|
||||
|
||||
dbBatches, err := s.baseDb.GetUnconfirmedBatches(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, dbBatch := range dbBatches {
|
||||
batch := convertBatchRow(dbBatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batches = append(batches, batch)
|
||||
}
|
||||
|
||||
return batches, err
|
||||
}
|
||||
|
||||
// InsertSweepBatch inserts a batch into the database, returning the id of the
|
||||
// inserted batch.
|
||||
func (s *SQLStore) InsertSweepBatch(ctx context.Context, batch *dbBatch) (int32,
|
||||
error) {
|
||||
|
||||
return s.baseDb.InsertBatch(ctx, batchToInsertArgs(*batch))
|
||||
}
|
||||
|
||||
// UpdateSweepBatch updates a batch in the database.
|
||||
func (s *SQLStore) UpdateSweepBatch(ctx context.Context, batch *dbBatch) error {
|
||||
return s.baseDb.UpdateBatch(ctx, batchToUpdateArgs(*batch))
|
||||
}
|
||||
|
||||
// ConfirmBatch confirms a batch by setting the state to confirmed.
|
||||
func (s *SQLStore) ConfirmBatch(ctx context.Context, id int32) error {
|
||||
return s.baseDb.ConfirmBatch(ctx, id)
|
||||
}
|
||||
|
||||
// FetchBatchSweeps fetches all the sweeps that are part a batch.
|
||||
func (s *SQLStore) FetchBatchSweeps(ctx context.Context, id int32) (
|
||||
[]*dbSweep, error) {
|
||||
|
||||
readOpts := loopdb.NewSqlReadOpts()
|
||||
var sweeps []*dbSweep
|
||||
|
||||
err := s.baseDb.ExecTx(ctx, readOpts, func(tx *sqlc.Queries) error {
|
||||
dbSweeps, err := tx.GetBatchSweeps(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dbSweep := range dbSweeps {
|
||||
updates, err := s.baseDb.GetSwapUpdates(
|
||||
ctx, dbSweep.SwapHash,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sweep, err := s.convertSweepRow(dbSweep, updates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sweeps = append(sweeps, &sweep)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sweeps, nil
|
||||
}
|
||||
|
||||
// UpsertSweep inserts a sweep into the database, or updates an existing sweep
|
||||
// if it already exists.
|
||||
func (s *SQLStore) UpsertSweep(ctx context.Context, sweep *dbSweep) error {
|
||||
return s.baseDb.UpsertSweep(ctx, sweepToUpsertArgs(*sweep))
|
||||
}
|
||||
|
||||
// GetSweepStatus returns true if the sweep has been completed.
|
||||
func (s *SQLStore) GetSweepStatus(ctx context.Context, swapHash lntypes.Hash) (
|
||||
bool, error) {
|
||||
|
||||
return s.baseDb.GetSweepStatus(ctx, swapHash[:])
|
||||
}
|
||||
|
||||
type dbBatch struct {
|
||||
// ID is the unique identifier of the batch.
|
||||
ID int32
|
||||
|
||||
// State is the current state of the batch.
|
||||
State string
|
||||
|
||||
// BatchTxid is the txid of the batch transaction.
|
||||
BatchTxid chainhash.Hash
|
||||
|
||||
// BatchPkScript is the pkscript of the batch transaction.
|
||||
BatchPkScript []byte
|
||||
|
||||
// LastRbfHeight is the height at which the last RBF attempt was made.
|
||||
LastRbfHeight int32
|
||||
|
||||
// LastRbfSatPerKw is the sat per kw of the last RBF attempt.
|
||||
LastRbfSatPerKw int32
|
||||
|
||||
// MaxTimeoutDistance is the maximum timeout distance of the batch.
|
||||
MaxTimeoutDistance int32
|
||||
}
|
||||
|
||||
type dbSweep struct {
|
||||
// ID is the unique identifier of the sweep.
|
||||
ID int32
|
||||
|
||||
// BatchID is the ID of the batch that the sweep belongs to.
|
||||
BatchID int32
|
||||
|
||||
// SwapHash is the hash of the swap that the sweep belongs to.
|
||||
SwapHash lntypes.Hash
|
||||
|
||||
// Outpoint is the outpoint of the sweep.
|
||||
Outpoint wire.OutPoint
|
||||
|
||||
// Amount is the amount of the sweep.
|
||||
Amount btcutil.Amount
|
||||
|
||||
// Completed indicates whether this sweep is completed.
|
||||
Completed bool
|
||||
|
||||
// LoopOut is the loop out that the sweep belongs to.
|
||||
LoopOut *loopdb.LoopOut
|
||||
}
|
||||
|
||||
// convertBatchRow converts a batch row from db to a sweepbatcher.Batch struct.
|
||||
func convertBatchRow(row sqlc.SweepBatch) *dbBatch {
|
||||
batch := dbBatch{
|
||||
ID: row.ID,
|
||||
}
|
||||
|
||||
if row.Confirmed {
|
||||
batch.State = batchOpen
|
||||
}
|
||||
|
||||
if row.BatchTxID.Valid {
|
||||
err := chainhash.Decode(&batch.BatchTxid, row.BatchTxID.String)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
batch.BatchPkScript = row.BatchPkScript
|
||||
|
||||
if row.LastRbfHeight.Valid {
|
||||
batch.LastRbfHeight = row.LastRbfHeight.Int32
|
||||
}
|
||||
|
||||
if row.LastRbfSatPerKw.Valid {
|
||||
batch.LastRbfSatPerKw = row.LastRbfSatPerKw.Int32
|
||||
}
|
||||
|
||||
batch.MaxTimeoutDistance = row.MaxTimeoutDistance
|
||||
|
||||
return &batch
|
||||
}
|
||||
|
||||
// BatchToUpsertArgs converts a Batch struct to the arguments needed to insert
|
||||
// it into the database.
|
||||
func batchToInsertArgs(batch dbBatch) sqlc.InsertBatchParams {
|
||||
args := sqlc.InsertBatchParams{
|
||||
Confirmed: false,
|
||||
BatchTxID: sql.NullString{
|
||||
Valid: true,
|
||||
String: batch.BatchTxid.String(),
|
||||
},
|
||||
BatchPkScript: batch.BatchPkScript,
|
||||
LastRbfHeight: sql.NullInt32{
|
||||
Valid: true,
|
||||
Int32: batch.LastRbfHeight,
|
||||
},
|
||||
LastRbfSatPerKw: sql.NullInt32{
|
||||
Valid: true,
|
||||
Int32: batch.LastRbfSatPerKw,
|
||||
},
|
||||
MaxTimeoutDistance: batch.MaxTimeoutDistance,
|
||||
}
|
||||
|
||||
if batch.State == batchConfirmed {
|
||||
args.Confirmed = true
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// BatchToUpsertArgs converts a Batch struct to the arguments needed to insert
|
||||
// it into the database.
|
||||
func batchToUpdateArgs(batch dbBatch) sqlc.UpdateBatchParams {
|
||||
args := sqlc.UpdateBatchParams{
|
||||
ID: batch.ID,
|
||||
Confirmed: false,
|
||||
BatchTxID: sql.NullString{
|
||||
Valid: true,
|
||||
String: batch.BatchTxid.String(),
|
||||
},
|
||||
BatchPkScript: batch.BatchPkScript,
|
||||
LastRbfHeight: sql.NullInt32{
|
||||
Valid: true,
|
||||
Int32: batch.LastRbfHeight,
|
||||
},
|
||||
LastRbfSatPerKw: sql.NullInt32{
|
||||
Valid: true,
|
||||
Int32: batch.LastRbfSatPerKw,
|
||||
},
|
||||
}
|
||||
|
||||
if batch.State == batchConfirmed {
|
||||
args.Confirmed = true
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// convertSweepRow converts a sweep row from db to a sweep struct.
|
||||
func (s *SQLStore) convertSweepRow(row sqlc.GetBatchSweepsRow,
|
||||
updates []sqlc.SwapUpdate) (dbSweep, error) {
|
||||
|
||||
sweep := dbSweep{
|
||||
ID: row.ID,
|
||||
BatchID: row.BatchID,
|
||||
Amount: btcutil.Amount(row.Amt),
|
||||
}
|
||||
|
||||
swapHash, err := lntypes.MakeHash(row.SwapHash)
|
||||
if err != nil {
|
||||
return sweep, err
|
||||
}
|
||||
|
||||
sweep.SwapHash = swapHash
|
||||
|
||||
hash, err := chainhash.NewHash(row.OutpointTxid)
|
||||
if err != nil {
|
||||
return sweep, err
|
||||
}
|
||||
|
||||
sweep.Outpoint = wire.OutPoint{
|
||||
Hash: *hash,
|
||||
Index: uint32(row.OutpointIndex),
|
||||
}
|
||||
|
||||
sweep.LoopOut, err = loopdb.ConvertLoopOutRow(
|
||||
s.network,
|
||||
sqlc.GetLoopOutSwapRow{
|
||||
ID: row.ID,
|
||||
SwapHash: row.SwapHash,
|
||||
Preimage: row.Preimage,
|
||||
InitiationTime: row.InitiationTime,
|
||||
AmountRequested: row.AmountRequested,
|
||||
CltvExpiry: row.CltvExpiry,
|
||||
MaxMinerFee: row.MaxMinerFee,
|
||||
MaxSwapFee: row.MaxSwapFee,
|
||||
InitiationHeight: row.InitiationHeight,
|
||||
ProtocolVersion: row.ProtocolVersion,
|
||||
Label: row.Label,
|
||||
DestAddress: row.DestAddress,
|
||||
SwapInvoice: row.SwapInvoice,
|
||||
MaxSwapRoutingFee: row.MaxSwapRoutingFee,
|
||||
SweepConfTarget: row.SweepConfTarget,
|
||||
HtlcConfirmations: row.HtlcConfirmations,
|
||||
OutgoingChanSet: row.OutgoingChanSet,
|
||||
PrepayInvoice: row.PrepayInvoice,
|
||||
MaxPrepayRoutingFee: row.MaxPrepayRoutingFee,
|
||||
PublicationDeadline: row.PublicationDeadline,
|
||||
SingleSweep: row.SingleSweep,
|
||||
SenderScriptPubkey: row.SenderScriptPubkey,
|
||||
ReceiverScriptPubkey: row.ReceiverScriptPubkey,
|
||||
SenderInternalPubkey: row.SenderInternalPubkey,
|
||||
ReceiverInternalPubkey: row.ReceiverInternalPubkey,
|
||||
ClientKeyFamily: row.ClientKeyFamily,
|
||||
ClientKeyIndex: row.ClientKeyIndex,
|
||||
}, updates,
|
||||
)
|
||||
|
||||
return sweep, err
|
||||
}
|
||||
|
||||
// sweepToUpsertArgs converts a Sweep struct to the arguments needed to insert.
|
||||
func sweepToUpsertArgs(sweep dbSweep) sqlc.UpsertSweepParams {
|
||||
return sqlc.UpsertSweepParams{
|
||||
SwapHash: sweep.SwapHash[:],
|
||||
BatchID: sweep.BatchID,
|
||||
OutpointTxid: sweep.Outpoint.Hash[:],
|
||||
OutpointIndex: int32(sweep.Outpoint.Index),
|
||||
Amt: int64(sweep.Amount),
|
||||
Completed: sweep.Completed,
|
||||
}
|
||||
}
|
@ -0,0 +1,125 @@
|
||||
package sweepbatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
)
|
||||
|
||||
// StoreMock implements a mock client swap store.
|
||||
type StoreMock struct {
|
||||
batches map[int32]dbBatch
|
||||
sweeps map[lntypes.Hash]dbSweep
|
||||
}
|
||||
|
||||
// NewStoreMock instantiates a new mock store.
|
||||
func NewStoreMock() *StoreMock {
|
||||
return &StoreMock{
|
||||
batches: make(map[int32]dbBatch),
|
||||
sweeps: make(map[lntypes.Hash]dbSweep),
|
||||
}
|
||||
}
|
||||
|
||||
// FetchUnconfirmedBatches fetches all the loop out sweep batches from the
|
||||
// database that are not in a confirmed state.
|
||||
func (s *StoreMock) FetchUnconfirmedSweepBatches(ctx context.Context) (
|
||||
[]*dbBatch, error) {
|
||||
|
||||
result := []*dbBatch{}
|
||||
for _, batch := range s.batches {
|
||||
batch := batch
|
||||
if batch.State != "confirmed" {
|
||||
result = append(result, &batch)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// InsertSweepBatch inserts a batch into the database, returning the id of the
|
||||
// inserted batch.
|
||||
func (s *StoreMock) InsertSweepBatch(ctx context.Context,
|
||||
batch *dbBatch) (int32, error) {
|
||||
|
||||
var id int32
|
||||
|
||||
if len(s.batches) == 0 {
|
||||
id = 0
|
||||
} else {
|
||||
id = int32(len(s.batches))
|
||||
}
|
||||
|
||||
s.batches[id] = *batch
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// UpdateSweepBatch updates a batch in the database.
|
||||
func (s *StoreMock) UpdateSweepBatch(ctx context.Context,
|
||||
batch *dbBatch) error {
|
||||
|
||||
s.batches[batch.ID] = *batch
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfirmBatch confirms a batch.
|
||||
func (s *StoreMock) ConfirmBatch(ctx context.Context, id int32) error {
|
||||
batch, ok := s.batches[id]
|
||||
if !ok {
|
||||
return errors.New("batch not found")
|
||||
}
|
||||
|
||||
batch.State = "confirmed"
|
||||
s.batches[batch.ID] = batch
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchBatchSweeps fetches all the sweeps that belong to a batch.
|
||||
func (s *StoreMock) FetchBatchSweeps(ctx context.Context,
|
||||
id int32) ([]*dbSweep, error) {
|
||||
|
||||
result := []*dbSweep{}
|
||||
for _, sweep := range s.sweeps {
|
||||
sweep := sweep
|
||||
if sweep.BatchID == id {
|
||||
result = append(result, &sweep)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i].ID < result[j].ID
|
||||
})
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UpsertSweep inserts a sweep into the database, or updates an existing sweep.
|
||||
func (s *StoreMock) UpsertSweep(ctx context.Context, sweep *dbSweep) error {
|
||||
s.sweeps[sweep.SwapHash] = *sweep
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSweepStatus returns the status of a sweep.
|
||||
func (s *StoreMock) GetSweepStatus(ctx context.Context,
|
||||
swapHash lntypes.Hash) (bool, error) {
|
||||
|
||||
sweep, ok := s.sweeps[swapHash]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return sweep.Completed, nil
|
||||
}
|
||||
|
||||
// Close closes the store.
|
||||
func (s *StoreMock) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssertSweepStored asserts that a sweep is stored.
|
||||
func (s *StoreMock) AssertSweepStored(id lntypes.Hash) bool {
|
||||
_, ok := s.sweeps[id]
|
||||
return ok
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,657 @@
|
||||
package sweepbatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightninglabs/lndclient"
|
||||
"github.com/lightninglabs/loop/loopdb"
|
||||
"github.com/lightninglabs/loop/utils"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultMaxTimeoutDistance is the default maximum timeout distance
|
||||
// of sweeps that can appear in the same batch.
|
||||
defaultMaxTimeoutDistance = 288
|
||||
|
||||
// batchOpen is the string representation of the state of a batch that
|
||||
// is open.
|
||||
batchOpen = "open"
|
||||
|
||||
// batchClosed is the string representation of the state of a batch
|
||||
// that is closed.
|
||||
batchClosed = "closed"
|
||||
|
||||
// batchConfirmed is the string representation of the state of a batch
|
||||
// that is confirmed.
|
||||
batchConfirmed = "confirmed"
|
||||
|
||||
// defaultMainnetPublishDelay is the default publish delay that is used
|
||||
// for mainnet.
|
||||
defaultMainnetPublishDelay = 5 * time.Second
|
||||
|
||||
// defaultTestnetPublishDelay is the default publish delay that is used
|
||||
// for testnet.
|
||||
defaultPublishDelay = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type BatcherStore interface {
|
||||
// FetchUnconfirmedSweepBatches fetches all the batches from the
|
||||
// database that are not in a confirmed state.
|
||||
FetchUnconfirmedSweepBatches(ctx context.Context) ([]*dbBatch,
|
||||
error)
|
||||
|
||||
// InsertSweepBatch inserts a batch into the database, returning the id
|
||||
// of the inserted batch.
|
||||
InsertSweepBatch(ctx context.Context,
|
||||
batch *dbBatch) (int32, error)
|
||||
|
||||
// UpdateSweepBatch updates a batch in the database.
|
||||
UpdateSweepBatch(ctx context.Context,
|
||||
batch *dbBatch) error
|
||||
|
||||
// ConfirmBatch confirms a batch by setting its state to confirmed.
|
||||
ConfirmBatch(ctx context.Context, id int32) error
|
||||
|
||||
// FetchBatchSweeps fetches all the sweeps that belong to a batch.
|
||||
FetchBatchSweeps(ctx context.Context,
|
||||
id int32) ([]*dbSweep, error)
|
||||
|
||||
// UpsertSweep inserts a sweep into the database, or updates an existing
|
||||
// sweep if it already exists.
|
||||
UpsertSweep(ctx context.Context, sweep *dbSweep) error
|
||||
|
||||
// GetSweepStatus returns the completed status of the sweep.
|
||||
GetSweepStatus(ctx context.Context, swapHash lntypes.Hash) (
|
||||
bool, error)
|
||||
}
|
||||
|
||||
// MuSig2SignSweep is a function that can be used to sign a sweep transaction
|
||||
// cooperatively with the swap server.
|
||||
type MuSig2SignSweep func(ctx context.Context,
|
||||
protocolVersion loopdb.ProtocolVersion, swapHash lntypes.Hash,
|
||||
paymentAddr [32]byte, nonce []byte, sweepTxPsbt []byte,
|
||||
prevoutMap map[wire.OutPoint]*wire.TxOut) (
|
||||
[]byte, []byte, error)
|
||||
|
||||
// VerifySchnorrSig is a function that can be used to verify a schnorr
|
||||
// signature.
|
||||
type VerifySchnorrSig func(pubKey *btcec.PublicKey, hash, sig []byte) error
|
||||
|
||||
// SweepRequest is a request to sweep a specific outpoint.
|
||||
type SweepRequest struct {
|
||||
// SwapHash is the hash of the swap that is being swept.
|
||||
SwapHash lntypes.Hash
|
||||
|
||||
// Outpoint is the outpoint that is being swept.
|
||||
Outpoint wire.OutPoint
|
||||
|
||||
// Value is the value of the outpoint that is being swept.
|
||||
Value btcutil.Amount
|
||||
|
||||
// Notifier is a notifier that is used to notify the requester of this
|
||||
// sweep that the sweep was successful.
|
||||
Notifier *SpendNotifier
|
||||
}
|
||||
|
||||
// SpendNotifier is a notifier that is used to notify the requester of a sweep
|
||||
// that the sweep was successful.
|
||||
type SpendNotifier struct {
|
||||
// SpendChan is a channel where the spend details are received.
|
||||
SpendChan chan *wire.MsgTx
|
||||
|
||||
// SpendErrChan is a channel where spend errors are received.
|
||||
SpendErrChan chan error
|
||||
|
||||
// QuitChan is a channel that can be closed to stop the notifier.
|
||||
QuitChan chan bool
|
||||
}
|
||||
|
||||
var (
|
||||
ErrBatcherShuttingDown = fmt.Errorf("batcher shutting down")
|
||||
)
|
||||
|
||||
// Batcher is a system that is responsible for accepting sweep requests and
|
||||
// placing them in appropriate batches. It will spin up new batches as needed.
|
||||
type Batcher struct {
|
||||
// batches is a map of batch IDs to the currently active batches.
|
||||
batches map[int32]*batch
|
||||
|
||||
// sweepReqs is a channel where sweep requests are received.
|
||||
sweepReqs chan SweepRequest
|
||||
|
||||
// errChan is a channel where errors are received.
|
||||
errChan chan error
|
||||
|
||||
// quit signals that the batch must stop.
|
||||
quit chan struct{}
|
||||
|
||||
// wallet is the wallet kit client that is used by batches.
|
||||
wallet lndclient.WalletKitClient
|
||||
|
||||
// chainNotifier is the chain notifier client that is used by batches.
|
||||
chainNotifier lndclient.ChainNotifierClient
|
||||
|
||||
// signerClient is the signer client that is used by batches.
|
||||
signerClient lndclient.SignerClient
|
||||
|
||||
// musig2ServerKit includes all the required functionality to collect
|
||||
// and verify signatures by the swap server in order to cooperatively
|
||||
// sweep funds.
|
||||
musig2ServerSign MuSig2SignSweep
|
||||
|
||||
// verifySchnorrSig is a function that can be used to verify a schnorr
|
||||
// signature.
|
||||
VerifySchnorrSig VerifySchnorrSig
|
||||
|
||||
// chainParams are the chain parameters of the chain that is used by
|
||||
// batches.
|
||||
chainParams *chaincfg.Params
|
||||
|
||||
// store includes all the database interactions that are needed by the
|
||||
// batcher and the batches.
|
||||
store BatcherStore
|
||||
|
||||
// swapStore includes all the database interactions that are needed for
|
||||
// interacting with swaps.
|
||||
swapStore loopdb.SwapStore
|
||||
|
||||
// wg is a waitgroup that is used to wait for all the goroutines to
|
||||
// exit.
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewBatcher creates a new Batcher instance.
|
||||
func NewBatcher(wallet lndclient.WalletKitClient,
|
||||
chainNotifier lndclient.ChainNotifierClient,
|
||||
signerClient lndclient.SignerClient, musig2ServerSigner MuSig2SignSweep,
|
||||
verifySchnorrSig VerifySchnorrSig, chainparams *chaincfg.Params,
|
||||
store BatcherStore, swapStore loopdb.SwapStore) *Batcher {
|
||||
|
||||
return &Batcher{
|
||||
batches: make(map[int32]*batch),
|
||||
sweepReqs: make(chan SweepRequest),
|
||||
errChan: make(chan error, 1),
|
||||
quit: make(chan struct{}),
|
||||
wallet: wallet,
|
||||
chainNotifier: chainNotifier,
|
||||
signerClient: signerClient,
|
||||
musig2ServerSign: musig2ServerSigner,
|
||||
VerifySchnorrSig: verifySchnorrSig,
|
||||
chainParams: chainparams,
|
||||
store: store,
|
||||
swapStore: swapStore,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the batcher and processes incoming sweep requests.
|
||||
func (b *Batcher) Run(ctx context.Context) error {
|
||||
runCtx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
|
||||
for _, batch := range b.batches {
|
||||
batch.Wait()
|
||||
}
|
||||
|
||||
b.wg.Wait()
|
||||
}()
|
||||
|
||||
// First we fetch all the batches that are not in a confirmed state from
|
||||
// the database. We will then resume the execution of these batches.
|
||||
batches, err := b.FetchUnconfirmedBatches(runCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, batch := range batches {
|
||||
err := b.spinUpBatchFromDB(runCtx, batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case sweepReq := <-b.sweepReqs:
|
||||
sweep, err := b.fetchSweep(runCtx, sweepReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = b.handleSweep(runCtx, sweep, sweepReq.Notifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case err := <-b.errChan:
|
||||
return err
|
||||
|
||||
case <-runCtx.Done():
|
||||
return runCtx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddSweep adds a sweep request to the batcher for handling. This will either
|
||||
// place the sweep in an existing batch or create a new one.
|
||||
func (b *Batcher) AddSweep(sweepReq *SweepRequest) error {
|
||||
select {
|
||||
case b.sweepReqs <- *sweepReq:
|
||||
return nil
|
||||
|
||||
case <-b.quit:
|
||||
return ErrBatcherShuttingDown
|
||||
}
|
||||
}
|
||||
|
||||
// handleSweep handles a sweep request by either placing it in an existing
|
||||
// batch, or by spinning up a new batch for it.
|
||||
func (b *Batcher) handleSweep(ctx context.Context, sweep *sweep,
|
||||
notifier *SpendNotifier) error {
|
||||
|
||||
completed, err := b.store.GetSweepStatus(ctx, sweep.swapHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Batcher handling sweep %x, completed=%v", sweep.swapHash[:6],
|
||||
completed)
|
||||
|
||||
// If the sweep has already been completed in a confirmed batch then we
|
||||
// can't attach its notifier to the batch as that is no longer running.
|
||||
// Instead we directly detect and return the spend here.
|
||||
if completed && *notifier != (SpendNotifier{}) {
|
||||
go b.monitorSpendAndNotify(ctx, sweep, notifier)
|
||||
return nil
|
||||
}
|
||||
|
||||
sweep.notifier = notifier
|
||||
|
||||
// Check if the sweep is already in a batch. If that is the case, we
|
||||
// provide the sweep to that batch and return.
|
||||
for _, batch := range b.batches {
|
||||
// This is a check to see if a batch is completed. In that case
|
||||
// we just lazily delete it and continue our scan.
|
||||
if batch.isComplete() {
|
||||
delete(b.batches, batch.id)
|
||||
continue
|
||||
}
|
||||
|
||||
if batch.sweepExists(sweep.swapHash) {
|
||||
accepted, err := batch.addSweep(ctx, sweep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
return fmt.Errorf("existing sweep %x was not "+
|
||||
"accepted by batch %d", sweep.swapHash[:6],
|
||||
batch.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If one of the batches accepts the sweep, we provide it to that batch.
|
||||
for _, batch := range b.batches {
|
||||
accepted, err := batch.addSweep(ctx, sweep)
|
||||
if err != nil && err != ErrBatchShuttingDown {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the sweep was accepted by this batch, we return, our job
|
||||
// is done.
|
||||
if accepted {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no batch is capable of accepting the sweep, we spin up a fresh
|
||||
// batch and hand the sweep over to it.
|
||||
batch, err := b.spinUpBatch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the sweep to the fresh batch.
|
||||
accepted, err := batch.addSweep(ctx, sweep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the sweep wasn't accepted by the fresh batch something is wrong,
|
||||
// we should return the error.
|
||||
if !accepted {
|
||||
return fmt.Errorf("sweep %x was not accepted by new batch %d",
|
||||
sweep.swapHash[:6], batch.id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// spinUpBatch spins up a new batch and returns it.
|
||||
func (b *Batcher) spinUpBatch(ctx context.Context) (*batch, error) {
|
||||
cfg := batchConfig{
|
||||
maxTimeoutDistance: defaultMaxTimeoutDistance,
|
||||
batchConfTarget: defaultBatchConfTarget,
|
||||
}
|
||||
|
||||
switch b.chainParams {
|
||||
case &chaincfg.MainNetParams:
|
||||
cfg.batchPublishDelay = defaultMainnetPublishDelay
|
||||
|
||||
default:
|
||||
cfg.batchPublishDelay = defaultPublishDelay
|
||||
}
|
||||
|
||||
batchKit := batchKit{
|
||||
returnChan: b.sweepReqs,
|
||||
wallet: b.wallet,
|
||||
chainNotifier: b.chainNotifier,
|
||||
signerClient: b.signerClient,
|
||||
musig2SignSweep: b.musig2ServerSign,
|
||||
verifySchnorrSig: b.VerifySchnorrSig,
|
||||
purger: b.AddSweep,
|
||||
store: b.store,
|
||||
}
|
||||
|
||||
batch := NewBatch(cfg, batchKit)
|
||||
|
||||
id, err := batch.insertAndAcquireID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We add the batch to our map of batches and start it.
|
||||
b.batches[id] = batch
|
||||
|
||||
b.wg.Add(1)
|
||||
go func() {
|
||||
defer b.wg.Done()
|
||||
|
||||
err := batch.Run(ctx)
|
||||
if err != nil {
|
||||
_ = b.writeToErrChan(ctx, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
// spinUpBatchDB spins up a batch that already existed in storage, then
|
||||
// returns it.
|
||||
func (b *Batcher) spinUpBatchFromDB(ctx context.Context, batch *batch) error {
|
||||
cfg := batchConfig{
|
||||
maxTimeoutDistance: batch.cfg.maxTimeoutDistance,
|
||||
batchConfTarget: defaultBatchConfTarget,
|
||||
}
|
||||
|
||||
rbfCache := rbfCache{
|
||||
LastHeight: batch.rbfCache.LastHeight,
|
||||
FeeRate: batch.rbfCache.FeeRate,
|
||||
}
|
||||
|
||||
dbSweeps, err := b.store.FetchBatchSweeps(ctx, batch.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dbSweeps) == 0 {
|
||||
return fmt.Errorf("batch %d has no sweeps", batch.id)
|
||||
}
|
||||
|
||||
primarySweep := dbSweeps[0]
|
||||
|
||||
sweeps := make(map[lntypes.Hash]sweep)
|
||||
|
||||
for _, dbSweep := range dbSweeps {
|
||||
sweep, err := b.convertSweep(dbSweep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sweeps[sweep.swapHash] = *sweep
|
||||
}
|
||||
|
||||
batchKit := batchKit{
|
||||
id: batch.id,
|
||||
batchTxid: batch.batchTxid,
|
||||
batchPkScript: batch.batchPkScript,
|
||||
state: batch.state,
|
||||
primaryID: primarySweep.SwapHash,
|
||||
sweeps: sweeps,
|
||||
rbfCache: rbfCache,
|
||||
returnChan: b.sweepReqs,
|
||||
wallet: b.wallet,
|
||||
chainNotifier: b.chainNotifier,
|
||||
signerClient: b.signerClient,
|
||||
musig2SignSweep: b.musig2ServerSign,
|
||||
verifySchnorrSig: b.VerifySchnorrSig,
|
||||
purger: b.AddSweep,
|
||||
store: b.store,
|
||||
log: batchPrefixLogger(fmt.Sprintf("%d", batch.id)),
|
||||
}
|
||||
|
||||
newBatch := NewBatchFromDB(cfg, batchKit)
|
||||
|
||||
// We add the batch to our map of batches and start it.
|
||||
b.batches[batch.id] = newBatch
|
||||
|
||||
b.wg.Add(1)
|
||||
go func() {
|
||||
defer b.wg.Done()
|
||||
|
||||
err := newBatch.Run(ctx)
|
||||
if err != nil {
|
||||
_ = b.writeToErrChan(ctx, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchUnconfirmedBatches fetches all the batches from the database that are
|
||||
// not in a confirmed state.
|
||||
func (b *Batcher) FetchUnconfirmedBatches(ctx context.Context) ([]*batch,
|
||||
error) {
|
||||
|
||||
dbBatches, err := b.store.FetchUnconfirmedSweepBatches(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batches := make([]*batch, 0, len(dbBatches))
|
||||
for _, bch := range dbBatches {
|
||||
bch := bch
|
||||
|
||||
batch := batch{}
|
||||
batch.id = bch.ID
|
||||
|
||||
switch bch.State {
|
||||
case batchOpen:
|
||||
batch.state = Open
|
||||
|
||||
case batchClosed:
|
||||
batch.state = Closed
|
||||
|
||||
case batchConfirmed:
|
||||
batch.state = Confirmed
|
||||
}
|
||||
|
||||
batch.batchTxid = &bch.BatchTxid
|
||||
batch.batchPkScript = bch.BatchPkScript
|
||||
|
||||
rbfCache := rbfCache{
|
||||
LastHeight: bch.LastRbfHeight,
|
||||
FeeRate: chainfee.SatPerKWeight(bch.LastRbfSatPerKw),
|
||||
}
|
||||
batch.rbfCache = rbfCache
|
||||
|
||||
bchCfg := batchConfig{
|
||||
maxTimeoutDistance: bch.MaxTimeoutDistance,
|
||||
}
|
||||
batch.cfg = &bchCfg
|
||||
|
||||
batches = append(batches, &batch)
|
||||
}
|
||||
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// monitorSpendAndNotify monitors the spend of a specific outpoint and writes
|
||||
// the response back to the response channel.
|
||||
func (b *Batcher) monitorSpendAndNotify(ctx context.Context, sweep *sweep,
|
||||
notifier *SpendNotifier) {
|
||||
|
||||
b.wg.Add(1)
|
||||
defer b.wg.Done()
|
||||
|
||||
spendCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
spendChan, spendErr, err := b.chainNotifier.RegisterSpendNtfn(
|
||||
spendCtx, &sweep.outpoint, sweep.htlc.PkScript,
|
||||
sweep.initiationHeight,
|
||||
)
|
||||
if err != nil {
|
||||
select {
|
||||
case notifier.SpendErrChan <- err:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
_ = b.writeToErrChan(ctx, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Batcher monitoring spend for swap %x", sweep.swapHash[:6])
|
||||
|
||||
for {
|
||||
select {
|
||||
case spend := <-spendChan:
|
||||
select {
|
||||
case notifier.SpendChan <- spend.SpendingTx:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case err := <-spendErr:
|
||||
select {
|
||||
case notifier.SpendErrChan <- err:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
_ = b.writeToErrChan(ctx, err)
|
||||
return
|
||||
|
||||
case <-notifier.QuitChan:
|
||||
return
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batcher) writeToErrChan(ctx context.Context, err error) error {
|
||||
select {
|
||||
case b.errChan <- err:
|
||||
return nil
|
||||
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// convertSweep converts a fetched sweep from the database to a sweep that is
|
||||
// ready to be processed by the batcher.
|
||||
func (b *Batcher) convertSweep(dbSweep *dbSweep) (*sweep, error) {
|
||||
swap := dbSweep.LoopOut
|
||||
|
||||
htlc, err := utils.GetHtlc(
|
||||
dbSweep.SwapHash, &swap.Contract.SwapContract, b.chainParams,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
swapPaymentAddr, err := utils.ObtainSwapPaymentAddr(
|
||||
swap.Contract.SwapInvoice, b.chainParams,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sweep{
|
||||
swapHash: swap.Hash,
|
||||
outpoint: dbSweep.Outpoint,
|
||||
value: dbSweep.Amount,
|
||||
confTarget: swap.Contract.SweepConfTarget,
|
||||
timeout: swap.Contract.CltvExpiry,
|
||||
initiationHeight: swap.Contract.InitiationHeight,
|
||||
htlc: *htlc,
|
||||
preimage: swap.Contract.Preimage,
|
||||
swapInvoicePaymentAddr: *swapPaymentAddr,
|
||||
htlcKeys: swap.Contract.HtlcKeys,
|
||||
htlcSuccessEstimator: htlc.AddSuccessToEstimator,
|
||||
protocolVersion: swap.Contract.ProtocolVersion,
|
||||
isExternalAddr: swap.Contract.IsExternalAddr,
|
||||
destAddr: swap.Contract.DestAddr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// fetchSweep fetches the sweep related information from the database.
|
||||
func (b *Batcher) fetchSweep(ctx context.Context,
|
||||
sweepReq SweepRequest) (*sweep, error) {
|
||||
|
||||
swapHash, err := lntypes.MakeHash(sweepReq.SwapHash[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse swapHash: %v", err)
|
||||
}
|
||||
|
||||
swap, err := b.swapStore.FetchLoopOutSwap(ctx, swapHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch loop out for %x: %v",
|
||||
swapHash[:6], err)
|
||||
}
|
||||
|
||||
htlc, err := utils.GetHtlc(
|
||||
swapHash, &swap.Contract.SwapContract, b.chainParams,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get htlc: %v", err)
|
||||
}
|
||||
|
||||
swapPaymentAddr, err := utils.ObtainSwapPaymentAddr(
|
||||
swap.Contract.SwapInvoice, b.chainParams,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get payment addr: %v", err)
|
||||
}
|
||||
|
||||
return &sweep{
|
||||
swapHash: swap.Hash,
|
||||
outpoint: sweepReq.Outpoint,
|
||||
value: sweepReq.Value,
|
||||
confTarget: swap.Contract.SweepConfTarget,
|
||||
timeout: swap.Contract.CltvExpiry,
|
||||
initiationHeight: swap.Contract.InitiationHeight,
|
||||
htlc: *htlc,
|
||||
preimage: swap.Contract.Preimage,
|
||||
swapInvoicePaymentAddr: *swapPaymentAddr,
|
||||
htlcKeys: swap.Contract.HtlcKeys,
|
||||
htlcSuccessEstimator: htlc.AddSuccessToEstimator,
|
||||
protocolVersion: swap.Contract.ProtocolVersion,
|
||||
isExternalAddr: swap.Contract.IsExternalAddr,
|
||||
destAddr: swap.Contract.DestAddr,
|
||||
}, nil
|
||||
}
|
@ -0,0 +1,986 @@
|
||||
package sweepbatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightninglabs/loop/loopdb"
|
||||
"github.com/lightninglabs/loop/test"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
swapInvoice = "lntb1230n1pjjszzgpp5j76f03wrkya4sm4gxv6az5nmz5aqsvmn4" +
|
||||
"tpguu2sdvdyygedqjgqdq9xyerxcqzzsxqr23ssp5rwzmwtfjmsgranfk8sr" +
|
||||
"4p4gcgmvyd42uug8pxteg2mkk23ndvkqs9qyyssq44ruk3ex59cmv4dm6k4v" +
|
||||
"0kc6c0gcqjs0gkljfyd6c6uatqa2f67xlx3pcg5tnvcae5p3jju8ra77e87d" +
|
||||
"vhhs0jrx53wnc0fq9rkrhmqqelyx7l"
|
||||
|
||||
eventuallyCheckFrequency = 100 * time.Millisecond
|
||||
|
||||
ntfnBufferSize = 1024
|
||||
)
|
||||
|
||||
func testMuSig2SignSweep(ctx context.Context,
|
||||
protocolVersion loopdb.ProtocolVersion, swapHash lntypes.Hash,
|
||||
paymentAddr [32]byte, nonce []byte, sweepTxPsbt []byte,
|
||||
prevoutMap map[wire.OutPoint]*wire.TxOut) (
|
||||
[]byte, []byte, error) {
|
||||
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var dummyNotifier = SpendNotifier{
|
||||
SpendChan: make(chan *wire.MsgTx, ntfnBufferSize),
|
||||
SpendErrChan: make(chan error, ntfnBufferSize),
|
||||
QuitChan: make(chan bool, ntfnBufferSize),
|
||||
}
|
||||
|
||||
// TestSweepBatcherBatchCreation tests that sweep requests enter the expected
|
||||
// batch based on their timeout distance.
|
||||
func TestSweepBatcherBatchCreation(t *testing.T) {
|
||||
defer test.Guard(t)()
|
||||
|
||||
lnd := test.NewMockLnd()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store := loopdb.NewStoreMock(t)
|
||||
|
||||
batcherStore := NewStoreMock()
|
||||
|
||||
batcher := NewBatcher(lnd.WalletKit, lnd.ChainNotifier, lnd.Signer,
|
||||
testMuSig2SignSweep, nil, lnd.ChainParams, batcherStore, store)
|
||||
go func() {
|
||||
err := batcher.Run(ctx)
|
||||
if !strings.Contains(err.Error(), "context canceled") {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a sweep request.
|
||||
sweepReq1 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{1, 1, 1},
|
||||
Value: 111,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{1, 1},
|
||||
Index: 1,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap1 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 111,
|
||||
},
|
||||
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err := store.CreateLoopOut(ctx, sweepReq1.SwapHash, swap1)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Deliver sweep request to batcher.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// Insert the same swap twice, this should be a noop.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Once batcher receives sweep request it will eventually spin up a
|
||||
// batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Create a second sweep request that has a timeout distance less than
|
||||
// our configured threshold.
|
||||
sweepReq2 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{2, 2, 2},
|
||||
Value: 222,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{2, 2},
|
||||
Index: 2,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap2 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance - 1,
|
||||
AmountRequested: 222,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq2.SwapHash, swap2)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
batcher.sweepReqs <- sweepReq2
|
||||
|
||||
// Batcher should not create a second batch as timeout distance is small
|
||||
// enough.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Create a third sweep request that has more timeout distance than
|
||||
// the default.
|
||||
sweepReq3 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{3, 3, 3},
|
||||
Value: 333,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{3, 3},
|
||||
Index: 3,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap3 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance + 1,
|
||||
AmountRequested: 333,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq3.SwapHash, swap3)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
batcher.sweepReqs <- sweepReq3
|
||||
|
||||
// Batcher should create a second batch as timeout distance is greater
|
||||
// than the threshold
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 2
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since the second batch got created we check that it registered its
|
||||
// primary sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
// Verify that each batch has the correct number of sweeps in it.
|
||||
for _, batch := range batcher.batches {
|
||||
switch batch.primarySweepID {
|
||||
case sweepReq1.SwapHash:
|
||||
if len(batch.sweeps) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq3.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Check that all sweeps were stored.
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq1.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq2.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq3.SwapHash))
|
||||
}
|
||||
|
||||
// TestSweepBatcherSimpleLifecycle tests the simple lifecycle of the batches
|
||||
// that are created and run by the batcher.
|
||||
func TestSweepBatcherSimpleLifecycle(t *testing.T) {
|
||||
defer test.Guard(t)()
|
||||
|
||||
lnd := test.NewMockLnd()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store := loopdb.NewStoreMock(t)
|
||||
|
||||
batcherStore := NewStoreMock()
|
||||
|
||||
batcher := NewBatcher(lnd.WalletKit, lnd.ChainNotifier, lnd.Signer,
|
||||
testMuSig2SignSweep, nil, lnd.ChainParams, batcherStore, store)
|
||||
go func() {
|
||||
err := batcher.Run(ctx)
|
||||
if !strings.Contains(err.Error(), "context canceled") {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a sweep request.
|
||||
sweepReq1 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{1, 1, 1},
|
||||
Value: 111,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{1, 1},
|
||||
Index: 1,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap1 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 111,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
SweepConfTarget: 111,
|
||||
}
|
||||
|
||||
err := store.CreateLoopOut(ctx, sweepReq1.SwapHash, swap1)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Deliver sweep request to batcher.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Eventually request will be consumed and a new batch will spin up.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// When batch is successfully created it will execute it's first step,
|
||||
// which leads to a spend monitor of the primary sweep.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// Find the batch and assign it to a local variable for easier access.
|
||||
batch := &batch{}
|
||||
for _, btch := range batcher.batches {
|
||||
if btch.primarySweepID == sweepReq1.SwapHash {
|
||||
batch = btch
|
||||
}
|
||||
}
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
// Batch should have the sweep stored.
|
||||
return len(batch.sweeps) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// The primary sweep id should be that of the first inserted sweep.
|
||||
require.Equal(t, batch.primarySweepID, sweepReq1.SwapHash)
|
||||
|
||||
err = lnd.NotifyHeight(601)
|
||||
require.NoError(t, err)
|
||||
|
||||
// After receiving a height notification the batch will step again,
|
||||
// leading to a new spend monitoring.
|
||||
require.Eventually(t, func() bool {
|
||||
return batch.currentHeight == 601
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Create the spending tx that will trigger the spend monitor of the
|
||||
// batch.
|
||||
spendingTx := &wire.MsgTx{
|
||||
Version: 1,
|
||||
// Since the spend monitor is registered on the primary sweep's
|
||||
// outpoint we insert that outpoint here.
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: sweepReq1.Outpoint,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
PkScript: []byte{3, 2, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
spendingTxHash := spendingTx.TxHash()
|
||||
|
||||
// Mock the spend notification that spends the swap.
|
||||
spendDetail := &chainntnfs.SpendDetail{
|
||||
SpentOutPoint: &sweepReq1.Outpoint,
|
||||
SpendingTx: spendingTx,
|
||||
SpenderTxHash: &spendingTxHash,
|
||||
SpenderInputIndex: 0,
|
||||
SpendingHeight: 601,
|
||||
}
|
||||
|
||||
// We notify the spend.
|
||||
lnd.SpendChannel <- spendDetail
|
||||
|
||||
// After receiving the spend, the batch is now monitoring for confs.
|
||||
<-lnd.RegisterConfChannel
|
||||
|
||||
// The batch should eventually read the spend notification and progress
|
||||
// its state to closed.
|
||||
require.Eventually(t, func() bool {
|
||||
return batch.state == Closed
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
err = lnd.NotifyHeight(604)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We mock the tx confirmation notification.
|
||||
lnd.ConfChannel <- &chainntnfs.TxConfirmation{
|
||||
Tx: spendingTx,
|
||||
}
|
||||
|
||||
// Eventually the batch receives the confirmation notification and
|
||||
// confirms itself.
|
||||
require.Eventually(t, func() bool {
|
||||
return batch.isComplete()
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
}
|
||||
|
||||
// TestSweepBatcherSweepReentry tests that when an old version of the batch tx
|
||||
// gets confirmed the sweep leftovers are sent back to the batcher.
|
||||
func TestSweepBatcherSweepReentry(t *testing.T) {
|
||||
defer test.Guard(t)()
|
||||
|
||||
lnd := test.NewMockLnd()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store := loopdb.NewStoreMock(t)
|
||||
|
||||
batcherStore := NewStoreMock()
|
||||
|
||||
batcher := NewBatcher(lnd.WalletKit, lnd.ChainNotifier, lnd.Signer,
|
||||
testMuSig2SignSweep, nil, lnd.ChainParams, batcherStore, store)
|
||||
go func() {
|
||||
err := batcher.Run(ctx)
|
||||
if !strings.Contains(err.Error(), "context canceled") {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create some sweep requests with timeouts not too far away, in order
|
||||
// to enter the same batch.
|
||||
sweepReq1 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{1, 1, 1},
|
||||
Value: 111,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{1, 1},
|
||||
Index: 1,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap1 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 111,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
SweepConfTarget: 111,
|
||||
}
|
||||
|
||||
err := store.CreateLoopOut(ctx, sweepReq1.SwapHash, swap1)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
sweepReq2 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{2, 2, 2},
|
||||
Value: 222,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{2, 2},
|
||||
Index: 2,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap2 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 222,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
SweepConfTarget: 111,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq2.SwapHash, swap2)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
sweepReq3 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{3, 3, 3},
|
||||
Value: 333,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{3, 3},
|
||||
Index: 3,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap3 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 333,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
SweepConfTarget: 111,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq3.SwapHash, swap3)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Feed the sweeps to the batcher.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// After inserting the primary (first) sweep, a spend monitor should be
|
||||
// registered.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
batcher.sweepReqs <- sweepReq2
|
||||
|
||||
batcher.sweepReqs <- sweepReq3
|
||||
|
||||
// Batcher should create a batch for the sweeps.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Find the batch and store it in a local variable for easier access.
|
||||
b := &batch{}
|
||||
for _, btch := range batcher.batches {
|
||||
if btch.primarySweepID == sweepReq1.SwapHash {
|
||||
b = btch
|
||||
}
|
||||
}
|
||||
|
||||
// Batcher should contain all sweeps.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(b.sweeps) == 3
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Verify that the batch has a primary sweep id that matches the first
|
||||
// inserted sweep, sweep1.
|
||||
require.Equal(t, b.primarySweepID, sweepReq1.SwapHash)
|
||||
|
||||
// Create the spending tx. In order to simulate an older version of the
|
||||
// batch transaction being confirmed, we only insert the primary sweep's
|
||||
// outpoint as a TxIn. This means that the other two sweeps did not
|
||||
// appear in the spending transaction. (This simulates a possible
|
||||
// scenario caused by RBF replacements.)
|
||||
spendingTx := &wire.MsgTx{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: sweepReq1.Outpoint,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: int64(sweepReq1.Value.ToUnit(btcutil.AmountSatoshi)),
|
||||
PkScript: []byte{3, 2, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
spendingTxHash := spendingTx.TxHash()
|
||||
|
||||
spendDetail := &chainntnfs.SpendDetail{
|
||||
SpentOutPoint: &sweepReq1.Outpoint,
|
||||
SpendingTx: spendingTx,
|
||||
SpenderTxHash: &spendingTxHash,
|
||||
SpenderInputIndex: 0,
|
||||
SpendingHeight: 601,
|
||||
}
|
||||
|
||||
// Send the spending notification to the mock channel.
|
||||
lnd.SpendChannel <- spendDetail
|
||||
|
||||
// After receiving the spend notification the batch should progress to
|
||||
// the next step, which is monitoring for confirmations.
|
||||
<-lnd.RegisterConfChannel
|
||||
|
||||
// Eventually the batch reads the notification and proceeds to a closed
|
||||
// state.
|
||||
require.Eventually(t, func() bool {
|
||||
return b.state == Closed
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// While handling the spend notification the batch should detect that
|
||||
// some sweeps did not appear in the spending tx, therefore it redirects
|
||||
// them back to the batcher and the batcher inserts them in a new batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 2
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since second batch was created we check that it registered for its
|
||||
// primary sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// We mock the confirmation notification.
|
||||
lnd.ConfChannel <- &chainntnfs.TxConfirmation{
|
||||
Tx: spendingTx,
|
||||
}
|
||||
|
||||
// Eventually the batch receives the confirmation notification,
|
||||
// gracefully exits and the batcher deletes it.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Find the other batch, which includes the sweeps that did not appear
|
||||
// in the spending tx.
|
||||
b = &batch{}
|
||||
for _, btch := range batcher.batches {
|
||||
b = btch
|
||||
}
|
||||
|
||||
// After all the sweeps enter, it should contain 2 sweeps.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(b.sweeps) == 2
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// The batch should be in an open state.
|
||||
require.Equal(t, b.state, Open)
|
||||
}
|
||||
|
||||
// TestSweepBatcherNonWalletAddr tests that sweep requests that sweep to a non
|
||||
// wallet address enter individual batches.
|
||||
func TestSweepBatcherNonWalletAddr(t *testing.T) {
|
||||
defer test.Guard(t)()
|
||||
|
||||
lnd := test.NewMockLnd()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store := loopdb.NewStoreMock(t)
|
||||
|
||||
batcherStore := NewStoreMock()
|
||||
|
||||
batcher := NewBatcher(lnd.WalletKit, lnd.ChainNotifier, lnd.Signer,
|
||||
testMuSig2SignSweep, nil, lnd.ChainParams, batcherStore, store)
|
||||
go func() {
|
||||
err := batcher.Run(ctx)
|
||||
if !strings.Contains(err.Error(), "context canceled") {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a sweep request.
|
||||
sweepReq1 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{1, 1, 1},
|
||||
Value: 111,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{1, 1},
|
||||
Index: 1,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap1 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 111,
|
||||
},
|
||||
IsExternalAddr: true,
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err := store.CreateLoopOut(ctx, sweepReq1.SwapHash, swap1)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Deliver sweep request to batcher.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Once batcher receives sweep request it will eventually spin up a
|
||||
// batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// Insert the same swap twice, this should be a noop.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Create a second sweep request that has a timeout distance less than
|
||||
// our configured threshold.
|
||||
sweepReq2 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{2, 2, 2},
|
||||
Value: 222,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{2, 2},
|
||||
Index: 2,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap2 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance - 1,
|
||||
AmountRequested: 222,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
IsExternalAddr: true,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq2.SwapHash, swap2)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
batcher.sweepReqs <- sweepReq2
|
||||
|
||||
// Batcher should create a second batch as first batch is a non wallet
|
||||
// addr batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 2
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// Create a third sweep request that has more timeout distance than
|
||||
// the default.
|
||||
sweepReq3 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{3, 3, 3},
|
||||
Value: 333,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{3, 3},
|
||||
Index: 3,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap3 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance + 1,
|
||||
AmountRequested: 333,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
IsExternalAddr: true,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq3.SwapHash, swap3)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
batcher.sweepReqs <- sweepReq3
|
||||
|
||||
// Batcher should create a new batch as timeout distance is greater than
|
||||
// the threshold
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 3
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
// Verify that each batch has the correct number of sweeps in it.
|
||||
for _, batch := range batcher.batches {
|
||||
switch batch.primarySweepID {
|
||||
case sweepReq1.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq2.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq3.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Check that all sweeps were stored.
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq1.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq2.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq3.SwapHash))
|
||||
}
|
||||
|
||||
// TestSweepBatcherComposite tests that sweep requests that sweep to both wallet
|
||||
// addresses and non-wallet addresses enter the correct batches.
|
||||
func TestSweepBatcherComposite(t *testing.T) {
|
||||
defer test.Guard(t)()
|
||||
|
||||
lnd := test.NewMockLnd()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store := loopdb.NewStoreMock(t)
|
||||
|
||||
batcherStore := NewStoreMock()
|
||||
|
||||
batcher := NewBatcher(lnd.WalletKit, lnd.ChainNotifier, lnd.Signer,
|
||||
testMuSig2SignSweep, nil, lnd.ChainParams, batcherStore, store)
|
||||
go func() {
|
||||
err := batcher.Run(ctx)
|
||||
if !strings.Contains(err.Error(), "context canceled") {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a sweep request.
|
||||
sweepReq1 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{1, 1, 1},
|
||||
Value: 111,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{1, 1},
|
||||
Index: 1,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap1 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111,
|
||||
AmountRequested: 111,
|
||||
},
|
||||
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err := store.CreateLoopOut(ctx, sweepReq1.SwapHash, swap1)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Create a second sweep request that has a timeout distance less than
|
||||
// our configured threshold.
|
||||
sweepReq2 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{2, 2, 2},
|
||||
Value: 222,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{2, 2},
|
||||
Index: 2,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap2 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance - 1,
|
||||
AmountRequested: 222,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq2.SwapHash, swap2)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Create a third sweep request that has less timeout distance than the
|
||||
// default max, but is not spending to a wallet address.
|
||||
sweepReq3 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{3, 3, 3},
|
||||
Value: 333,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{3, 3},
|
||||
Index: 3,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap3 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance - 3,
|
||||
AmountRequested: 333,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
IsExternalAddr: true,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq3.SwapHash, swap3)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Create a fourth sweep request that has a timeout which is not valid
|
||||
// for the first batch, so it will cause it to create a new batch.
|
||||
sweepReq4 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{4, 4, 4},
|
||||
Value: 444,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{4, 4},
|
||||
Index: 4,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap4 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance + 1,
|
||||
AmountRequested: 444,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq4.SwapHash, swap4)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Create a fifth sweep request that has a timeout which is not valid
|
||||
// for the first batch, but a valid timeout for the new batch.
|
||||
sweepReq5 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{5, 5, 5},
|
||||
Value: 555,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{5, 5},
|
||||
Index: 5,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap5 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance + 5,
|
||||
AmountRequested: 555,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq5.SwapHash, swap5)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Create a sixth sweep request that has a valid timeout for the new
|
||||
// batch, but is paying to a non-wallet address.
|
||||
sweepReq6 := SweepRequest{
|
||||
SwapHash: lntypes.Hash{6, 6, 6},
|
||||
Value: 666,
|
||||
Outpoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{6, 6},
|
||||
Index: 6,
|
||||
},
|
||||
Notifier: &dummyNotifier,
|
||||
}
|
||||
|
||||
swap6 := &loopdb.LoopOutContract{
|
||||
SwapContract: loopdb.SwapContract{
|
||||
CltvExpiry: 111 + defaultMaxTimeoutDistance + 6,
|
||||
AmountRequested: 666,
|
||||
},
|
||||
SwapInvoice: swapInvoice,
|
||||
IsExternalAddr: true,
|
||||
}
|
||||
|
||||
err = store.CreateLoopOut(ctx, sweepReq6.SwapHash, swap6)
|
||||
require.NoError(t, err)
|
||||
store.AssertLoopOutStored()
|
||||
|
||||
// Deliver sweep request to batcher.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
// Once batcher receives sweep request it will eventually spin up a
|
||||
// batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
// Insert the same swap twice, this should be a noop.
|
||||
batcher.sweepReqs <- sweepReq1
|
||||
|
||||
batcher.sweepReqs <- sweepReq2
|
||||
|
||||
// Batcher should not create a second batch as timeout distance is small
|
||||
// enough.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 1
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
batcher.sweepReqs <- sweepReq3
|
||||
|
||||
// Batcher should create a second batch as this sweep pays to a non
|
||||
// wallet address.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 2
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
batcher.sweepReqs <- sweepReq4
|
||||
|
||||
// Batcher should create a third batch as timeout distance is greater
|
||||
// than the threshold.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 3
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
batcher.sweepReqs <- sweepReq5
|
||||
|
||||
// Batcher should not create a fourth batch as timeout distance is small
|
||||
// enough for it to join the last batch.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 3
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
batcher.sweepReqs <- sweepReq6
|
||||
|
||||
// Batcher should create a fourth batch as this sweep pays to a non
|
||||
// wallet address.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(batcher.batches) == 4
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Since a batch was created we check that it registered for its primary
|
||||
// sweep's spend.
|
||||
<-lnd.RegisterSpendChannel
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
// Verify that each batch has the correct number of sweeps in
|
||||
// it.
|
||||
for _, batch := range batcher.batches {
|
||||
switch batch.primarySweepID {
|
||||
case sweepReq1.SwapHash:
|
||||
if len(batch.sweeps) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq3.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq4.SwapHash:
|
||||
if len(batch.sweeps) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
case sweepReq6.SwapHash:
|
||||
if len(batch.sweeps) != 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, test.Timeout, eventuallyCheckFrequency)
|
||||
|
||||
// Check that all sweeps were stored.
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq1.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq2.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq3.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq4.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq5.SwapHash))
|
||||
require.True(t, batcherStore.AssertSweepStored(sweepReq6.SwapHash))
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/lightninglabs/loop/loopdb"
|
||||
"github.com/lightninglabs/loop/swap"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
// GetHtlc composes and returns the on-chain swap script.
|
||||
func GetHtlc(hash lntypes.Hash, contract *loopdb.SwapContract,
|
||||
chainParams *chaincfg.Params) (*swap.Htlc, error) {
|
||||
|
||||
switch GetHtlcScriptVersion(contract.ProtocolVersion) {
|
||||
case swap.HtlcV2:
|
||||
return swap.NewHtlcV2(
|
||||
contract.CltvExpiry, contract.HtlcKeys.SenderScriptKey,
|
||||
contract.HtlcKeys.ReceiverScriptKey, hash,
|
||||
chainParams,
|
||||
)
|
||||
|
||||
case swap.HtlcV3:
|
||||
// Swaps that implement the new MuSig2 protocol will be expected
|
||||
// to use the 1.0RC2 MuSig2 key derivation scheme.
|
||||
muSig2Version := input.MuSig2Version040
|
||||
if contract.ProtocolVersion >= loopdb.ProtocolVersionMuSig2 {
|
||||
muSig2Version = input.MuSig2Version100RC2
|
||||
}
|
||||
|
||||
return swap.NewHtlcV3(
|
||||
muSig2Version,
|
||||
contract.CltvExpiry,
|
||||
contract.HtlcKeys.SenderInternalPubKey,
|
||||
contract.HtlcKeys.ReceiverInternalPubKey,
|
||||
contract.HtlcKeys.SenderScriptKey,
|
||||
contract.HtlcKeys.ReceiverScriptKey,
|
||||
hash, chainParams,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, swap.ErrInvalidScriptVersion
|
||||
}
|
||||
|
||||
// GetHtlcScriptVersion returns the correct HTLC script version for the passed
|
||||
// protocol version.
|
||||
func GetHtlcScriptVersion(
|
||||
protocolVersion loopdb.ProtocolVersion) swap.ScriptVersion {
|
||||
|
||||
// If the swap was initiated before we had our v3 script, use v2.
|
||||
if protocolVersion < loopdb.ProtocolVersionHtlcV3 ||
|
||||
protocolVersion == loopdb.ProtocolVersionUnrecorded {
|
||||
|
||||
return swap.HtlcV2
|
||||
}
|
||||
|
||||
return swap.HtlcV3
|
||||
}
|
||||
|
||||
// ObtainSwapPaymentAddr will retrieve the payment addr from the passed invoice.
|
||||
func ObtainSwapPaymentAddr(swapInvoice string, chainParams *chaincfg.Params) (
|
||||
*[32]byte, error) {
|
||||
|
||||
swapPayReq, err := zpay32.Decode(swapInvoice, chainParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if swapPayReq.PaymentAddr == nil {
|
||||
return nil, fmt.Errorf("expected payment address for invoice")
|
||||
}
|
||||
|
||||
return swapPayReq.PaymentAddr, nil
|
||||
}
|
Loading…
Reference in New Issue