feat: INP2P v0.1.0 — complete P2P tunneling system

Core modules (M1-M6):
- pkg/protocol: message format, encoding, NAT type enums
- pkg/config: server/client config structs, env vars, validation
- pkg/auth: CRC64 token, TOTP gen/verify, one-time relay tokens
- pkg/nat: UDP/TCP STUN client and server
- pkg/signal: WSS message dispatch, sync request/response
- pkg/punch: UDP/TCP hole punching + priority chain
- pkg/mux: stream multiplexer (7B frame: StreamID+Flags+Len)
- pkg/tunnel: mux-based port forwarding with stats
- pkg/relay: relay manager with TOTP auth + session bridging
- internal/server: signaling server (login/heartbeat/report/coordinator)
- internal/client: client (NAT detect/login/punch/relay/reconnect)
- cmd/inp2ps + cmd/inp2pc: main entrypoints with graceful shutdown

All tests pass: 16 tests across 5 packages
Code: 3559 lines core + 861 lines tests = 19 source files
This commit is contained in:
2026-03-02 15:13:22 +08:00
commit 91e3d4da2a
23 changed files with 4681 additions and 0 deletions

233
pkg/tunnel/tunnel.go Normal file
View File

@@ -0,0 +1,233 @@
// Package tunnel provides P2P tunnel with mux-based port forwarding.
package tunnel
import (
"fmt"
"io"
"log"
"net"
"sync"
"sync/atomic"
"time"
"github.com/openp2p-cn/inp2p/pkg/mux"
)
// Tunnel represents a P2P tunnel that multiplexes port forwards over one connection.
type Tunnel struct {
PeerNode string
PeerIP string
LinkMode string // "udppunch", "tcppunch", "relay", "direct"
RTT time.Duration
sess *mux.Session
listeners map[int]*forwarder // srcPort → forwarder
mu sync.Mutex
closed int32
stats Stats
}
type forwarder struct {
listener net.Listener
dstHost string
dstPort int
quit chan struct{}
}
// Stats tracks tunnel traffic.
type Stats struct {
BytesSent int64
BytesReceived int64
Connections int64
ActiveStreams int32
}
// New creates a tunnel from an established P2P connection.
// isInitiator: the side that opened the P2P connection is the mux client.
func New(peerNode string, conn net.Conn, linkMode string, rtt time.Duration, isInitiator bool) *Tunnel {
return &Tunnel{
PeerNode: peerNode,
PeerIP: conn.RemoteAddr().String(),
LinkMode: linkMode,
RTT: rtt,
sess: mux.NewSession(conn, !isInitiator), // initiator=client, responder=server
listeners: make(map[int]*forwarder),
}
}
// ListenAndForward starts a local listener that forwards connections through the tunnel.
// Each accepted connection opens a mux stream to the peer, which connects to dstHost:dstPort.
func (t *Tunnel) ListenAndForward(protocol string, srcPort int, dstHost string, dstPort int) error {
addr := fmt.Sprintf(":%d", srcPort)
ln, err := net.Listen(protocol, addr)
if err != nil {
return fmt.Errorf("listen %s %s: %w", protocol, addr, err)
}
fwd := &forwarder{
listener: ln,
dstHost: dstHost,
dstPort: dstPort,
quit: make(chan struct{}),
}
t.mu.Lock()
t.listeners[srcPort] = fwd
t.mu.Unlock()
log.Printf("[tunnel] LISTEN %s:%d → %s(%s:%d) via %s", protocol, srcPort, t.PeerNode, dstHost, dstPort, t.LinkMode)
go t.acceptLoop(fwd)
return nil
}
func (t *Tunnel) acceptLoop(fwd *forwarder) {
for {
conn, err := fwd.listener.Accept()
if err != nil {
select {
case <-fwd.quit:
return
default:
if atomic.LoadInt32(&t.closed) == 1 {
return
}
log.Printf("[tunnel] accept error: %v", err)
continue
}
}
atomic.AddInt64(&t.stats.Connections, 1)
go t.handleLocalConn(conn, fwd.dstHost, fwd.dstPort)
}
}
func (t *Tunnel) handleLocalConn(local net.Conn, dstHost string, dstPort int) {
defer local.Close()
// Open a mux stream
stream, err := t.sess.Open()
if err != nil {
log.Printf("[tunnel] mux open error: %v", err)
return
}
defer stream.Close()
atomic.AddInt32(&t.stats.ActiveStreams, 1)
defer atomic.AddInt32(&t.stats.ActiveStreams, -1)
// Send destination info as first message on the stream
// Format: "host:port\n"
header := fmt.Sprintf("%s:%d\n", dstHost, dstPort)
if _, err := stream.Write([]byte(header)); err != nil {
log.Printf("[tunnel] stream write header: %v", err)
return
}
// Bidirectional copy
t.bridge(local, stream)
}
// AcceptAndConnect handles incoming mux streams (called on the responder side).
// It reads the destination header and connects to the local target.
func (t *Tunnel) AcceptAndConnect() {
for {
stream, err := t.sess.Accept()
if err != nil {
if !t.sess.IsClosed() {
log.Printf("[tunnel] mux accept error: %v", err)
}
return
}
go t.handleRemoteStream(stream)
}
}
func (t *Tunnel) handleRemoteStream(stream *mux.Stream) {
defer stream.Close()
atomic.AddInt32(&t.stats.ActiveStreams, 1)
defer atomic.AddInt32(&t.stats.ActiveStreams, -1)
// Read destination header: "host:port\n"
buf := make([]byte, 256)
n := 0
for n < len(buf) {
nn, err := stream.Read(buf[n : n+1])
if err != nil {
log.Printf("[tunnel] read dest header: %v", err)
return
}
n += nn
if buf[n-1] == '\n' {
break
}
}
dest := string(buf[:n-1]) // trim \n
// Connect to local destination
conn, err := net.DialTimeout("tcp", dest, 5*time.Second)
if err != nil {
log.Printf("[tunnel] connect to %s failed: %v", dest, err)
return
}
defer conn.Close()
log.Printf("[tunnel] stream → %s connected", dest)
// Bidirectional copy
t.bridge(conn, stream)
}
func (t *Tunnel) bridge(a, b io.ReadWriter) {
var wg sync.WaitGroup
wg.Add(2)
copyAndCount := func(dst io.Writer, src io.Reader, counter *int64) {
defer wg.Done()
n, _ := io.Copy(dst, src)
atomic.AddInt64(counter, n)
}
go copyAndCount(a, b, &t.stats.BytesReceived)
go copyAndCount(b, a, &t.stats.BytesSent)
wg.Wait()
}
// Close shuts down the tunnel and all listeners.
func (t *Tunnel) Close() {
if !atomic.CompareAndSwapInt32(&t.closed, 0, 1) {
return
}
t.mu.Lock()
for port, fwd := range t.listeners {
close(fwd.quit)
fwd.listener.Close()
log.Printf("[tunnel] stopped :%d", port)
}
t.mu.Unlock()
t.sess.Close()
log.Printf("[tunnel] closed → %s", t.PeerNode)
}
// GetStats returns traffic statistics.
func (t *Tunnel) GetStats() Stats {
return Stats{
BytesSent: atomic.LoadInt64(&t.stats.BytesSent),
BytesReceived: atomic.LoadInt64(&t.stats.BytesReceived),
Connections: atomic.LoadInt64(&t.stats.Connections),
ActiveStreams: atomic.LoadInt32(&t.stats.ActiveStreams),
}
}
// IsAlive returns true if the tunnel is open.
func (t *Tunnel) IsAlive() bool {
return atomic.LoadInt32(&t.closed) == 0 && !t.sess.IsClosed()
}
// NumStreams returns active mux streams.
func (t *Tunnel) NumStreams() int {
return t.sess.NumStreams()
}

176
pkg/tunnel/tunnel_test.go Normal file
View File

@@ -0,0 +1,176 @@
package tunnel
import (
"fmt"
"io"
"net"
"testing"
"time"
)
func TestEndToEndForward(t *testing.T) {
// 1. Start a "target" TCP server (simulates SSH on the remote side)
targetLn, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer targetLn.Close()
targetPort := targetLn.Addr().(*net.TCPAddr).Port
go func() {
for {
conn, err := targetLn.Accept()
if err != nil {
return
}
go func(c net.Conn) {
defer c.Close()
buf := make([]byte, 1024)
n, _ := c.Read(buf)
c.Write([]byte("ECHO:" + string(buf[:n])))
}(conn)
}
}()
// 2. Create a connected pair (simulates a P2P punch connection)
c1, c2 := net.Pipe()
// 3. Create tunnels on both sides
initiator := New("remote-node", c1, "test", 0, true)
responder := New("local-node", c2, "test", 0, false)
defer initiator.Close()
defer responder.Close()
// Responder accepts incoming mux streams and connects to local targets
go responder.AcceptAndConnect()
// 4. Initiator listens on a local port and forwards to remote target
localLn, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
localPort := localLn.Addr().(*net.TCPAddr).Port
localLn.Close() // free the port so tunnel can use it
err = initiator.ListenAndForward("tcp", localPort, "127.0.0.1", targetPort)
if err != nil {
t.Fatal(err)
}
time.Sleep(50 * time.Millisecond)
// 5. Connect to the tunnel's local port
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", localPort))
if err != nil {
t.Fatal(err)
}
defer conn.Close()
// 6. Send data and verify echo
conn.Write([]byte("hello-tunnel"))
conn.SetReadDeadline(time.Now().Add(3 * time.Second))
buf := make([]byte, 1024)
n, err := conn.Read(buf)
if err != nil {
t.Fatal(err)
}
got := string(buf[:n])
want := "ECHO:hello-tunnel"
if got != want {
t.Errorf("got %q, want %q", got, want)
}
}
func TestMultipleConnections(t *testing.T) {
// Target server: echoes back with a prefix
targetLn, _ := net.Listen("tcp", "127.0.0.1:0")
defer targetLn.Close()
targetPort := targetLn.Addr().(*net.TCPAddr).Port
go func() {
for {
conn, err := targetLn.Accept()
if err != nil {
return
}
go func(c net.Conn) {
defer c.Close()
io.Copy(c, c) // pure echo
}(conn)
}
}()
c1, c2 := net.Pipe()
initiator := New("peer", c1, "test", 0, true)
responder := New("me", c2, "test", 0, false)
defer initiator.Close()
defer responder.Close()
go responder.AcceptAndConnect()
localLn, _ := net.Listen("tcp", "127.0.0.1:0")
localPort := localLn.Addr().(*net.TCPAddr).Port
localLn.Close()
initiator.ListenAndForward("tcp", localPort, "127.0.0.1", targetPort)
time.Sleep(50 * time.Millisecond)
// Open 5 concurrent connections through the tunnel
const N = 5
done := make(chan bool, N)
for i := 0; i < N; i++ {
go func(idx int) {
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", localPort))
if err != nil {
t.Errorf("conn %d: dial: %v", idx, err)
done <- false
return
}
defer conn.Close()
msg := fmt.Sprintf("msg-%d", idx)
conn.Write([]byte(msg))
conn.SetReadDeadline(time.Now().Add(3 * time.Second))
buf := make([]byte, 256)
n, err := conn.Read(buf)
if err != nil || string(buf[:n]) != msg {
t.Errorf("conn %d: got %q, want %q, err=%v", idx, buf[:n], msg, err)
done <- false
return
}
done <- true
}(i)
}
for i := 0; i < N; i++ {
if ok := <-done; !ok {
t.Errorf("connection %d failed", i)
}
}
stats := initiator.GetStats()
if stats.Connections != N {
t.Errorf("connections: got %d want %d", stats.Connections, N)
}
}
func TestTunnelStats(t *testing.T) {
c1, c2 := net.Pipe()
initiator := New("peer", c1, "test", 0, true)
responder := New("me", c2, "test", 0, false)
defer initiator.Close()
defer responder.Close()
if !initiator.IsAlive() || !responder.IsAlive() {
t.Error("tunnels should be alive")
}
initiator.Close()
time.Sleep(50 * time.Millisecond)
if initiator.IsAlive() {
t.Error("initiator should be dead after close")
}
}