Commit 2dc1f691 authored by @RubenKelevra's avatar @RubenKelevra

quic: remove experimental status and add it to the default config

parent 53739fae
...@@ -73,6 +73,8 @@ COPY --from=0 /usr/lib/*-linux-gnu*/libcrypto.so* /usr/lib/ ...@@ -73,6 +73,8 @@ COPY --from=0 /usr/lib/*-linux-gnu*/libcrypto.so* /usr/lib/
# Swarm TCP; should be exposed to the public # Swarm TCP; should be exposed to the public
EXPOSE 4001 EXPOSE 4001
# Swarm UDP; should be exposed to the public
EXPOSE 4001/udp
# Daemon API; must not be exposed publicly but to client services under you control # Daemon API; must not be exposed publicly but to client services under you control
EXPOSE 5001 EXPOSE 5001
# Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org # Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org
......
...@@ -344,7 +344,7 @@ IPFS files that will persist when you restart the container. ...@@ -344,7 +344,7 @@ IPFS files that will persist when you restart the container.
Start a container running ipfs and expose ports 4001, 5001 and 8080: Start a container running ipfs and expose ports 4001, 5001 and 8080:
docker run -d --name ipfs_host -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest docker run -d --name ipfs_host -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
Watch the ipfs log: Watch the ipfs log:
...@@ -376,16 +376,16 @@ Stop the running container: ...@@ -376,16 +376,16 @@ Stop the running container:
When starting a container running ipfs for the first time with an empty data directory, it will call `ipfs init` to initialize configuration files and generate a new keypair. At this time, you can choose which profile to apply using the `IPFS_PROFILE` environment variable: When starting a container running ipfs for the first time with an empty data directory, it will call `ipfs init` to initialize configuration files and generate a new keypair. At this time, you can choose which profile to apply using the `IPFS_PROFILE` environment variable:
docker run -d --name ipfs_host -e IPFS_PROFILE=server -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest docker run -d --name ipfs_host -e IPFS_PROFILE=server -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
It is possible to initialize the container with a swarm key file (`/data/ipfs/swarm.key`) using the variables `IPFS_SWARM_KEY` and `IPFS_SWARM_KEY_FILE`. The `IPFS_SWARM_KEY` creates `swarm.key` with the contents of the variable itself, whilst `IPFS_SWARM_KEY_FILE` copies the key from a path stored in the variable. The `IPFS_SWARM_KEY_FILE` **overwrites** the key generated by `IPFS_SWARM_KEY`. It is possible to initialize the container with a swarm key file (`/data/ipfs/swarm.key`) using the variables `IPFS_SWARM_KEY` and `IPFS_SWARM_KEY_FILE`. The `IPFS_SWARM_KEY` creates `swarm.key` with the contents of the variable itself, whilst `IPFS_SWARM_KEY_FILE` copies the key from a path stored in the variable. The `IPFS_SWARM_KEY_FILE` **overwrites** the key generated by `IPFS_SWARM_KEY`.
docker run -d --name ipfs_host -e IPFS_SWARM_KEY=<your swarm key> -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest docker run -d --name ipfs_host -e IPFS_SWARM_KEY=<your swarm key> -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
The swarm key initialization can also be done using docker secrets **(requires docker swarm or docker-compose)**: The swarm key initialization can also be done using docker secrets **(requires docker swarm or docker-compose)**:
cat your_swarm.key | docker secret create swarm_key_secret - cat your_swarm.key | docker secret create swarm_key_secret -
docker run -d --name ipfs_host --secret swarm_key_secret -e IPFS_SWARM_KEY_FILE=/run/secrets/swarm_key_secret -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest docker run -d --name ipfs_host --secret swarm_key_secret -e IPFS_SWARM_KEY_FILE=/run/secrets/swarm_key_secret -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
### Troubleshooting ### Troubleshooting
......
...@@ -20,7 +20,7 @@ func TestInitialization(t *testing.T) { ...@@ -20,7 +20,7 @@ func TestInitialization(t *testing.T) {
{ {
Identity: id, Identity: id,
Addresses: config.Addresses{ Addresses: config.Addresses{
Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic"},
API: []string{"/ip4/127.0.0.1/tcp/8000"}, API: []string{"/ip4/127.0.0.1/tcp/8000"},
}, },
}, },
...@@ -28,7 +28,7 @@ func TestInitialization(t *testing.T) { ...@@ -28,7 +28,7 @@ func TestInitialization(t *testing.T) {
{ {
Identity: id, Identity: id,
Addresses: config.Addresses{ Addresses: config.Addresses{
Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic"},
API: []string{"/ip4/127.0.0.1/tcp/8000"}, API: []string{"/ip4/127.0.0.1/tcp/8000"},
}, },
}, },
......
...@@ -140,7 +140,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { ...@@ -140,7 +140,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
} }
c.Bootstrap = cfg.DefaultBootstrapAddresses c.Bootstrap = cfg.DefaultBootstrapAddresses
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic"}
c.Identity.PeerID = pid.Pretty() c.Identity.PeerID = pid.Pretty()
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
......
...@@ -130,7 +130,6 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config) fx.Option { ...@@ -130,7 +130,6 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config) fx.Option {
maybeProvide(libp2p.BandwidthCounter, !cfg.Swarm.DisableBandwidthMetrics), maybeProvide(libp2p.BandwidthCounter, !cfg.Swarm.DisableBandwidthMetrics),
maybeProvide(libp2p.NatPortMap, !cfg.Swarm.DisableNatPortMap), maybeProvide(libp2p.NatPortMap, !cfg.Swarm.DisableNatPortMap),
maybeProvide(libp2p.AutoRelay, cfg.Swarm.EnableAutoRelay), maybeProvide(libp2p.AutoRelay, cfg.Swarm.EnableAutoRelay),
maybeProvide(libp2p.QUIC, cfg.Experimental.QUIC),
autonat, autonat,
connmgr, connmgr,
ps, ps,
......
...@@ -8,7 +8,7 @@ import ( ...@@ -8,7 +8,7 @@ import (
tls "github.com/libp2p/go-libp2p-tls" tls "github.com/libp2p/go-libp2p-tls"
) )
var DefaultTransports = simpleOpt(libp2p.DefaultTransports) var DefaultTransports = simpleOpt(libp2p.ChainOptions(libp2p.DefaultTransports, libp2p.Transport(libp2pquic.NewTransport)))
var QUIC = simpleOpt(libp2p.Transport(libp2pquic.NewTransport)) var QUIC = simpleOpt(libp2p.Transport(libp2pquic.NewTransport))
func Security(enabled bool) interface{} { func Security(enabled bool) interface{} {
......
...@@ -200,7 +200,9 @@ Default: ...@@ -200,7 +200,9 @@ Default:
```json ```json
[ [
"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001" "/ip6/::/tcp/4001",
"/ip6/0.0.0.0/udp/4001/quic",
"/ip6/::/udp/4001/quic"
] ]
``` ```
......
...@@ -300,12 +300,17 @@ func main() { ...@@ -300,12 +300,17 @@ func main() {
// IPFS Cluster Pinning nodes // IPFS Cluster Pinning nodes
"/ip4/138.201.67.219/tcp/4001/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA", "/ip4/138.201.67.219/tcp/4001/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA",
"/ip4/138.201.67.219/udp/4001/quic/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA",
"/ip4/138.201.67.220/tcp/4001/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i", "/ip4/138.201.67.220/tcp/4001/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i",
"/ip4/138.201.67.220/udp/4001/quic/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i",
"/ip4/138.201.68.74/tcp/4001/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR", "/ip4/138.201.68.74/tcp/4001/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR",
"/ip4/138.201.68.74/udp/4001/quic/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR",
"/ip4/94.130.135.167/tcp/4001/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE", "/ip4/94.130.135.167/tcp/4001/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE",
"/ip4/94.130.135.167/udp/4001/quic/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE",
// You can add more nodes here, for example, another IPFS node you might have running locally, mine was: // You can add more nodes here, for example, another IPFS node you might have running locally, mine was:
// "/ip4/127.0.0.1/tcp/4010/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L", // "/ip4/127.0.0.1/tcp/4010/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L",
// "/ip4/127.0.0.1/udp/4010/quic/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L",
} }
go connectToPeers(ctx, ipfs, bootstrapNodes) go connectToPeers(ctx, ipfs, bootstrapNodes)
......
...@@ -56,8 +56,6 @@ func createTempRepo(ctx context.Context) (string, error) { ...@@ -56,8 +56,6 @@ func createTempRepo(ctx context.Context) (string, error) {
cfg.Experimental.Libp2pStreamMounting = true cfg.Experimental.Libp2pStreamMounting = true
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#p2p-http-proxy // https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#p2p-http-proxy
cfg.Experimental.P2pHttpProxy = true cfg.Experimental.P2pHttpProxy = true
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#quic
cfg.Experimental.QUIC = true
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#strategic-providing // https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#strategic-providing
cfg.Experimental.StrategicProviding = true cfg.Experimental.StrategicProviding = true
......
...@@ -22,7 +22,6 @@ the above issue. ...@@ -22,7 +22,6 @@ the above issue.
- [Plugins](#plugins) - [Plugins](#plugins)
- [Directory Sharding / HAMT](#directory-sharding--hamt) - [Directory Sharding / HAMT](#directory-sharding--hamt)
- [IPNS PubSub](#ipns-pubsub) - [IPNS PubSub](#ipns-pubsub)
- [QUIC](#quic)
- [AutoRelay](#autorelay) - [AutoRelay](#autorelay)
- [Strategic Providing](#strategic-providing) - [Strategic Providing](#strategic-providing)
- [Graphsync](#graphsync) - [Graphsync](#graphsync)
...@@ -463,35 +462,6 @@ run your daemon with the `--enable-namesys-pubsub` flag; enables pubsub. ...@@ -463,35 +462,6 @@ run your daemon with the `--enable-namesys-pubsub` flag; enables pubsub.
- [ ] Needs more people to use and report on how well it works - [ ] Needs more people to use and report on how well it works
- [ ] Pubsub enabled as a real feature - [ ] Pubsub enabled as a real feature
## QUIC
### In Version
0.4.18
### State
Candidate, disabled by default but it will be enabled by default in 0.6.0.
### How to enable
Modify your ipfs config:
```
ipfs config --json Experimental.QUIC true
```
For listening on a QUIC address, add it to the swarm addresses, e.g. `/ip4/0.0.0.0/udp/4001/quic`.
### Road to being a real feature
- [ ] The IETF QUIC specification needs to be finalized.
- [ ] Make sure QUIC connections work reliably
- [ ] Make sure QUIC connection offer equal or better performance than TCP connections on real-world networks
- [ ] Finalize libp2p-TLS handshake spec.
## AutoRelay ## AutoRelay
### In Version ### In Version
......
...@@ -46,7 +46,9 @@ addresses (like the example below), then your nodes are online. ...@@ -46,7 +46,9 @@ addresses (like the example below), then your nodes are online.
"PublicKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZb6znj3LQZKP1+X81exf+vbnqNCMtHjZ5RKTCm7Fytnfe+AI1fhs9YbZdkgFkM1HLxmIOLQj2bMXPIGxUM+EnewN8tWurx4B3+lR/LWNwNYcCFL+jF2ltc6SE6BC8kMLEZd4zidOLPZ8lIRpd0x3qmsjhGefuRwrKeKlR4tQ3C76ziOms47uLdiVVkl5LyJ5+mn4rXOjNKt/oy2O4m1St7X7/yNt8qQgYsPfe/hCOywxCEIHEkqmil+vn7bu4RpAtsUzCcBDoLUIWuU3i6qfytD05hP8Clo+at+l//ctjMxylf3IQ5qyP+yfvazk+WHcsB0tWueEmiU5P2nfUUIR3AgMBAAE=", "PublicKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZb6znj3LQZKP1+X81exf+vbnqNCMtHjZ5RKTCm7Fytnfe+AI1fhs9YbZdkgFkM1HLxmIOLQj2bMXPIGxUM+EnewN8tWurx4B3+lR/LWNwNYcCFL+jF2ltc6SE6BC8kMLEZd4zidOLPZ8lIRpd0x3qmsjhGefuRwrKeKlR4tQ3C76ziOms47uLdiVVkl5LyJ5+mn4rXOjNKt/oy2O4m1St7X7/yNt8qQgYsPfe/hCOywxCEIHEkqmil+vn7bu4RpAtsUzCcBDoLUIWuU3i6qfytD05hP8Clo+at+l//ctjMxylf3IQ5qyP+yfvazk+WHcsB0tWueEmiU5P2nfUUIR3AgMBAAE=",
"Addresses": [ "Addresses": [
"/ip4/127.0.0.1/tcp/4001/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8", "/ip4/127.0.0.1/tcp/4001/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8",
"/ip4/127.0.0.1/udp/4001/quic/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8",
"/ip4/192.168.2.131/tcp/4001/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8", "/ip4/192.168.2.131/tcp/4001/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8",
"/ip4/192.168.2.131/udp/4001/quic/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8",
], ],
"AgentVersion": "go-ipfs/0.4.11-dev/", "AgentVersion": "go-ipfs/0.4.11-dev/",
"ProtocolVersion": "ipfs/0.1.0" "ProtocolVersion": "ipfs/0.1.0"
...@@ -90,8 +92,11 @@ Example output of addresses might look something like this: ...@@ -90,8 +92,11 @@ Example output of addresses might look something like this:
``` ```
/ip4/127.0.0.1/tcp/4001 /ip4/127.0.0.1/tcp/4001
/ip4/127.0.0.1/udp/4001/quic
/ip4/192.168.2.133/tcp/4001 /ip4/192.168.2.133/tcp/4001
/ip4/192.168.2.133/udp/4001/quic
/ip4/88.157.217.196/tcp/63674 /ip4/88.157.217.196/tcp/63674
/ip4/88.157.217.196/udp/63674/quic
``` ```
In this case, we can see a localhost (127.0.0.1) address, a LAN address (the In this case, we can see a localhost (127.0.0.1) address, a LAN address (the
......
...@@ -89,12 +89,6 @@ test_expect_success "set up tcp testbed" ' ...@@ -89,12 +89,6 @@ test_expect_success "set up tcp testbed" '
iptb testbed create -type localipfs -count 2 -force -init iptb testbed create -type localipfs -count 2 -force -init
' '
# Enable quic but don't use it yet.
test_expect_success "enable QUIC experiment" '
ipfsi 0 config --json Experimental.QUIC true &&
ipfsi 1 config --json Experimental.QUIC true
'
# test multiplex muxer # test multiplex muxer
echo "Running advanced tests with mplex" echo "Running advanced tests with mplex"
export LIBP2P_MUX_PREFS="/mplex/6.7.0" export LIBP2P_MUX_PREFS="/mplex/6.7.0"
......
...@@ -21,11 +21,6 @@ test_expect_success 'filter 127.0.0.0/24 on node 1' ' ...@@ -21,11 +21,6 @@ test_expect_success 'filter 127.0.0.0/24 on node 1' '
' '
for i in $(seq 0 $(( NUM_NODES - 1 ))); do for i in $(seq 0 $(( NUM_NODES - 1 ))); do
test_expect_success 'enable quic for node $i' '
echo "$i"
ipfsi $i config --json Experimental.QUIC true
'
test_expect_success "change IP for node $i" ' test_expect_success "change IP for node $i" '
ipfsi $i config --json "Addresses.Swarm" \ ipfsi $i config --json "Addresses.Swarm" \
"[\"/ip4/127.0.$i.1/tcp/0\",\"/ip4/127.0.$i.1/udp/0/quic\",\"/ip4/127.0.$i.1/tcp/0/ws\"]" "[\"/ip4/127.0.$i.1/tcp/0\",\"/ip4/127.0.$i.1/udp/0/quic\",\"/ip4/127.0.$i.1/tcp/0/ws\"]"
......
...@@ -11,11 +11,6 @@ test_expect_success 'init iptb' ' ...@@ -11,11 +11,6 @@ test_expect_success 'init iptb' '
iptb testbed create -type localipfs -count 2 -init iptb testbed create -type localipfs -count 2 -init
' '
test_expect_success "enable QUIC experiment" '
ipfsi 0 config --json Experimental.QUIC true &&
ipfsi 1 config --json Experimental.QUIC true
'
addr1='"[\"/ip4/127.0.0.1/udp/0/quic/\"]"' addr1='"[\"/ip4/127.0.0.1/udp/0/quic/\"]"'
addr2='"[\"/ip4/127.0.0.1/udp/0/quic/\"]"' addr2='"[\"/ip4/127.0.0.1/udp/0/quic/\"]"'
test_expect_success "add QUIC swarm addresses" ' test_expect_success "add QUIC swarm addresses" '
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment