diff --git a/.baseimage-release b/.baseimage-release new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/.baseimage-release @@ -0,0 +1 @@ +0.1.0 diff --git a/.gitignore b/.gitignore index 23147526e14..7a9e3f87b43 100755 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,8 @@ go-carpet-coverage* # make node-sdk copied files sdk/node/lib/protos/* report.xml +.settings +.project +.gradle +build/ +bin/ diff --git a/Makefile b/Makefile index 1bdf1ec7ced..93dee3c6d5f 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ # - dist-clean - superset of 'clean' that also removes persistent state PROJECT_NAME = hyperledger/fabric -BASE_VERSION = 0.6.1-preview +BASE_VERSION = 0.7.0 IS_RELEASE = false ifneq ($(IS_RELEASE),true) @@ -47,13 +47,16 @@ else PROJECT_VERSION=$(BASE_VERSION) endif -DOCKER_TAG=$(shell uname -m)-$(PROJECT_VERSION) - PKGNAME = github.com/$(PROJECT_NAME) GO_LDFLAGS = -X github.com/hyperledger/fabric/metadata.Version=$(PROJECT_VERSION) CGO_FLAGS = CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" UID = $(shell id -u) +ARCH=$(shell uname -m) CHAINTOOL_RELEASE=v0.9.1 +BASEIMAGE_RELEASE=$(shell cat ./.baseimage-release) + +DOCKER_TAG=$(ARCH)-$(PROJECT_VERSION) +BASE_DOCKER_TAG=$(ARCH)-$(BASEIMAGE_RELEASE) EXECUTABLES = go docker git curl K := $(foreach exec,$(EXECUTABLES),\ @@ -63,13 +66,9 @@ K := $(foreach exec,$(EXECUTABLES),\ SUBDIRS = gotools sdk/node SUBDIRS:=$(strip $(SUBDIRS)) -# Make our baseimage depend on any changes to images/base or scripts/provision -BASEIMAGE_RELEASE = $(shell cat ./images/base/release) -BASEIMAGE_DEPS = $(shell git ls-files images/base scripts/provision) - JAVASHIM_DEPS = $(shell git ls-files core/chaincode/shim/java) PROJECT_FILES = $(shell git ls-files) -IMAGES = base src ccenv peer membersrvc javaenv +IMAGES = src ccenv peer membersrvc javaenv all: peer membersrvc checks @@ -122,14 +121,14 @@ linter: gotools # we may later inject the binary into a different docker environment # This is necessary since we cannot guarantee that binaries built # on the host natively will be compatible with the docker env. -%/bin/protoc-gen-go: build/image/base/.dummy Makefile +%/bin/protoc-gen-go: Makefile @echo "Building $@" @mkdir -p $(@D) @docker run -i \ --user=$(UID) \ -v $(abspath vendor/github.com/golang/protobuf):/opt/gopath/src/github.com/golang/protobuf \ -v $(abspath $(@D)):/opt/gopath/bin \ - hyperledger/fabric-baseimage go install github.com/golang/protobuf/protoc-gen-go + hyperledger/fabric-baseimage:$(BASE_DOCKER_TAG) go install github.com/golang/protobuf/protoc-gen-go build/bin/chaintool: Makefile @echo "Installing chaintool" @@ -172,25 +171,19 @@ build/bin/block-listener: @echo "Binary available as $@" @touch $@ -build/bin/%: build/image/base/.dummy $(PROJECT_FILES) +build/bin/%: $(PROJECT_FILES) @mkdir -p $(@D) @echo "$@" $(CGO_FLAGS) GOBIN=$(abspath $(@D)) go install -ldflags "$(GO_LDFLAGS)" $(PKGNAME)/$(@F) @echo "Binary available as $@" @touch $@ -# Special override for base-image. -build/image/base/.dummy: $(BASEIMAGE_DEPS) - @echo "Building docker base-image" - @mkdir -p $(@D) - @./scripts/provision/docker.sh $(BASEIMAGE_RELEASE) - @touch $@ - # Special override for src-image -build/image/src/.dummy: build/image/base/.dummy $(PROJECT_FILES) +build/image/src/.dummy: $(PROJECT_FILES) @echo "Building docker src-image" @mkdir -p $(@D) @cat images/src/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ > $(@D)/Dockerfile @git ls-files | tar -jcT - > $(@D)/gopath.tar.bz2 @@ -202,6 +195,7 @@ build/image/src/.dummy: build/image/base/.dummy $(PROJECT_FILES) build/image/ccenv/.dummy: build/image/src/.dummy build/image/ccenv/bin/protoc-gen-go build/image/ccenv/bin/chaintool Makefile @echo "Building docker ccenv-image" @cat images/ccenv/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ > $(@D)/Dockerfile docker build -t $(PROJECT_NAME)-ccenv $(@D) @@ -216,7 +210,10 @@ build/image/ccenv/.dummy: build/image/src/.dummy build/image/ccenv/bin/protoc-ge build/image/javaenv/.dummy: Makefile $(JAVASHIM_DEPS) @echo "Building docker javaenv-image" @mkdir -p $(@D) - @cat images/javaenv/Dockerfile.in > $(@D)/Dockerfile + @cat images/javaenv/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ + | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ + > $(@D)/Dockerfile @git ls-files core/chaincode/shim/java | tar -jcT - > $(@D)/javashimsrc.tar.bz2 @git ls-files protos core/chaincode/shim/table.proto settings.gradle | tar -jcT - > $(@D)/protos.tar.bz2 docker build -t $(PROJECT_NAME)-javaenv $(@D) @@ -229,6 +226,7 @@ build/image/%/.dummy: build/image/src/.dummy build/docker/bin/% @echo "Building docker $(TARGET)-image" @mkdir -p $(@D)/bin @cat images/app/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ > $(@D)/Dockerfile cp build/docker/bin/$(TARGET) $(@D)/bin @@ -240,10 +238,6 @@ build/image/%/.dummy: build/image/src/.dummy build/docker/bin/% protos: gotools ./devenv/compile_protos.sh -base-image-clean: - -docker rmi -f $(PROJECT_NAME)-baseimage - -@rm -rf build/image/base ||: - src-image-clean: ccenv-image-clean peer-image-clean membersrvc-image-clean %-image-clean: diff --git a/core/chaincode/chaincode_support.go b/core/chaincode/chaincode_support.go index 4a23327313b..fd21dfc5304 100644 --- a/core/chaincode/chaincode_support.go +++ b/core/chaincode/chaincode_support.go @@ -25,6 +25,7 @@ import ( "time" "github.com/golang/protobuf/proto" + logging "github.com/op/go-logging" "github.com/spf13/viper" "golang.org/x/net/context" @@ -34,6 +35,7 @@ import ( "github.com/hyperledger/fabric/core/container/ccintf" "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/flogging" pb "github.com/hyperledger/fabric/protos" ) @@ -147,6 +149,21 @@ func NewChaincodeSupport(chainname ChainName, getPeerEndpoint func() (*pb.PeerEn s.keepalive = time.Duration(t) * time.Second } + viper.SetEnvPrefix("CORE") + viper.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + + chaincodeLogLevelString := viper.GetString("logging.chaincode") + chaincodeLogLevel, err := logging.LogLevel(chaincodeLogLevelString) + + if err == nil { + s.chaincodeLogLevel = chaincodeLogLevel.String() + } else { + chaincodeLogger.Infof("chaincode logging level %s is invalid. defaulting to %s\n", chaincodeLogLevelString, flogging.DefaultLoggingLevel().String()) + s.chaincodeLogLevel = flogging.DefaultLoggingLevel().String() + } + return s } @@ -172,6 +189,7 @@ type ChaincodeSupport struct { peerTLSKeyFile string peerTLSSvrHostOrd string keepalive time.Duration + chaincodeLogLevel string } // DuplicateChaincodeHandlerError returned if attempt to register same chaincodeID while a stream already exists. @@ -290,6 +308,11 @@ func (chaincodeSupport *ChaincodeSupport) getArgsAndEnv(cID *pb.ChaincodeID, cLa } else { envs = append(envs, "CORE_PEER_TLS_ENABLED=false") } + + if chaincodeSupport.chaincodeLogLevel != "" { + envs = append(envs, "CORE_LOGGING_CHAINCODE="+chaincodeSupport.chaincodeLogLevel) + } + switch cLang { case pb.ChaincodeSpec_GOLANG, pb.ChaincodeSpec_CAR: //chaincode executable will be same as the name of the chaincode diff --git a/core/chaincode/chaincodetest.yaml b/core/chaincode/chaincodetest.yaml index 63b7983de97..18e7483632e 100644 --- a/core/chaincode/chaincodetest.yaml +++ b/core/chaincode/chaincodetest.yaml @@ -165,15 +165,6 @@ peer: # networkId: test networkId: dev - Dockerfile: | - from hyperledger/fabric-baseimage:latest - # Copy GOPATH src and install Peer - COPY src $GOPATH/src - RUN mkdir -p /var/hyperledger/db - WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/ - RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin - - # The Address this Peer will listen on listenAddress: 0.0.0.0:21212 # The Address this Peer will bind to for providing services @@ -363,17 +354,16 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-baseimage + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests diff --git a/core/chaincode/exectransaction_test.go b/core/chaincode/exectransaction_test.go index a1d10fcd052..0bca0bc93f7 100644 --- a/core/chaincode/exectransaction_test.go +++ b/core/chaincode/exectransaction_test.go @@ -827,6 +827,18 @@ func TestChaincodeInvokeChaincode(t *testing.T) { go grpcServer.Serve(lis) + err = chaincodeInvokeChaincode(t, "") + if err != nil { + t.Fail() + t.Logf("Failed chaincode invoke chaincode : %s", err) + closeListenerAndSleep(lis) + return + } + + closeListenerAndSleep(lis) +} + +func chaincodeInvokeChaincode(t *testing.T, user string) (err error) { var ctxt = context.Background() // Deploy first chaincode @@ -836,7 +848,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f := "init" args := util.ToChaincodeArgs(f, "a", "100", "b", "200") - spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} _, err = deploy(ctxt, spec1) chaincodeID1 := spec1.ChaincodeID.Name @@ -844,7 +856,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Fail() t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) - closeListenerAndSleep(lis) return } @@ -859,7 +870,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f = "init" args = util.ToChaincodeArgs(f, "e", "0") - spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} _, err = deploy(ctxt, spec2) chaincodeID2 := spec2.ChaincodeID.Name @@ -868,7 +879,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } @@ -878,7 +888,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f = "invoke" args = util.ToChaincodeArgs(f, "e", "1") - spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} // Invoke chaincode var uuid string _, uuid, _, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_INVOKE) @@ -888,7 +898,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Error invoking <%s>: %s", chaincodeID2, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } @@ -899,13 +908,67 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) + + return +} + +func TestChaincodeInvokeChaincodeWithSec(t *testing.T) { + testDBWrapper.CleanDB(t) + viper.Set("security.enabled", "true") + + //Initialize crypto + if err := crypto.Init(); err != nil { + panic(fmt.Errorf("Failed initializing the crypto layer [%s]", err)) + } + + //set paths for memberservice to pick up + viper.Set("peer.fileSystemPath", filepath.Join(os.TempDir(), "hyperledger", "production")) + viper.Set("server.rootpath", filepath.Join(os.TempDir(), "ca")) + + var err error + var memSrvcLis net.Listener + if memSrvcLis, err = initMemSrvc(); err != nil { + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + time.Sleep(2 * time.Second) + + var peerLis net.Listener + if peerLis, err = initPeer(); err != nil { + finitMemSrvc(memSrvcLis) + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + if err = crypto.RegisterClient("jim", nil, "jim", "6avZQLwcUe9b"); err != nil { + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + //login as jim and test chaincode-chaincode interaction with security + if err = chaincodeInvokeChaincode(t, "jim"); err != nil { + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + t.Fail() + t.Logf("Error executing test %s", err) + return + } + + //cleanup + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + } // Test the execution of a chaincode that invokes another chaincode with wrong parameters. Should receive error from diff --git a/core/chaincode/handler.go b/core/chaincode/handler.go index 52c66deeef1..b045a442def 100644 --- a/core/chaincode/handler.go +++ b/core/chaincode/handler.go @@ -161,9 +161,9 @@ func (handler *Handler) deleteRangeQueryIterator(txContext *transactionContext, delete(txContext.rangeQueryIteratorMap, txid) } -//THIS CAN BE REMOVED ONCE WE SUPPORT CONFIDENTIALITY WITH CC-CALLING-CC -//we dissallow chaincode-chaincode interactions till confidentiality implications are understood -func (handler *Handler) canCallChaincode(txid string) *pb.ChaincodeMessage { +//THIS CAN BE REMOVED ONCE WE FULL SUPPORT (Invoke and Query) CONFIDENTIALITY WITH CC-CALLING-CC +//Only invocation are allowed, not queries +func (handler *Handler) canCallChaincode(txid string, isQuery bool) *pb.ChaincodeMessage { secHelper := handler.chaincodeSupport.getSecHelper() if secHelper == nil { return nil @@ -176,7 +176,9 @@ func (handler *Handler) canCallChaincode(txid string) *pb.ChaincodeMessage { } else if txctx.transactionSecContext == nil { errMsg = fmt.Sprintf("[%s]Error transaction context is nil while checking for confidentiality. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) } else if txctx.transactionSecContext.ConfidentialityLevel != pb.ConfidentialityLevel_PUBLIC { - errMsg = fmt.Sprintf("[%s]Error chaincode-chaincode interactions not supported for with privacy enabled. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) + if isQuery { + errMsg = fmt.Sprintf("[%s]Error chaincode-chaincode interactions not supported for with privacy enabled. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) + } } if errMsg != "" { @@ -209,10 +211,12 @@ func (handler *Handler) encryptOrDecrypt(encrypt bool, txid string, payload []by var err error if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_DEPLOY { if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, handler.deployTXSecContext); err != nil { + chaincodeLogger.Errorf("error getting crypto encryptor for deploy tx :%s", err) return nil, fmt.Errorf("error getting crypto encryptor for deploy tx :%s", err) } } else if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_INVOKE || txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_QUERY { if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, txctx.transactionSecContext); err != nil { + chaincodeLogger.Errorf("error getting crypto encryptor %s", err) return nil, fmt.Errorf("error getting crypto encryptor %s", err) } } else { @@ -1046,7 +1050,9 @@ func (handler *Handler) enterBusyState(e *fsm.Event, state string) { err = ledgerObj.DeleteState(chaincodeID, key) } else if msg.Type.String() == pb.ChaincodeMessage_INVOKE_CHAINCODE.String() { //check and prohibit C-call-C for CONFIDENTIAL txs - if triggerNextStateMsg = handler.canCallChaincode(msg.Txid); triggerNextStateMsg != nil { + chaincodeLogger.Debugf("[%s] C-call-C", shorttxid(msg.Txid)) + + if triggerNextStateMsg = handler.canCallChaincode(msg.Txid, false); triggerNextStateMsg != nil { return } chaincodeSpec := &pb.ChaincodeSpec{} @@ -1060,12 +1066,21 @@ func (handler *Handler) enterBusyState(e *fsm.Event, state string) { // Get the chaincodeID to invoke newChaincodeID := chaincodeSpec.ChaincodeID.Name + chaincodeLogger.Debugf("[%s] C-call-C %s", shorttxid(msg.Txid), newChaincodeID) // Create the transaction object chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec} transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Txid, pb.Transaction_CHAINCODE_INVOKE) - // Launch the new chaincode if not already running + tsc := handler.getTxContext(msg.Txid).transactionSecContext + + transaction.Nonce = tsc.Nonce + transaction.ConfidentialityLevel = tsc.ConfidentialityLevel + transaction.ConfidentialityProtocolVersion = tsc.ConfidentialityProtocolVersion + transaction.Metadata = tsc.Metadata + transaction.Cert = tsc.Cert + + // cd the new chaincode if not already running _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction) if launchErr != nil { payload := []byte(launchErr.Error()) @@ -1217,7 +1232,7 @@ func (handler *Handler) initializeSecContext(tx, depTx *pb.Transaction) error { return nil } -func (handler *Handler) setChaincodeSecurityContext(tx *pb.Transaction, msg *pb.ChaincodeMessage) error { +func (handler *Handler) setChaincodeSecurityContext(tx, depTx *pb.Transaction, msg *pb.ChaincodeMessage) error { chaincodeLogger.Debug("setting chaincode security context...") if msg.SecurityContext == nil { msg.SecurityContext = &pb.ChaincodeSecurityContext{} @@ -1248,6 +1263,13 @@ func (handler *Handler) setChaincodeSecurityContext(tx *pb.Transaction, msg *pb. return err } + msg.SecurityContext.Payload = ctorMsgRaw + // TODO: add deploy metadata + if depTx != nil { + msg.SecurityContext.ParentMetadata = depTx.Metadata + } else { + msg.SecurityContext.ParentMetadata = handler.deployTXSecContext.Metadata + } msg.SecurityContext.Payload = ctorMsgRaw msg.SecurityContext.TxTimestamp = tx.Timestamp } @@ -1289,7 +1311,7 @@ func (handler *Handler) initOrReady(txid string, initArgs [][]byte, tx *pb.Trans } //if security is disabled the context elements will just be nil - if err := handler.setChaincodeSecurityContext(tx, ccMsg); err != nil { + if err := handler.setChaincodeSecurityContext(tx, depTx, ccMsg); err != nil { return nil, err } @@ -1317,7 +1339,7 @@ func (handler *Handler) handleQueryChaincode(msg *pb.ChaincodeMessage) { }() //check and prohibit C-call-C for CONFIDENTIAL txs - if serialSendMsg = handler.canCallChaincode(msg.Txid); serialSendMsg != nil { + if serialSendMsg = handler.canCallChaincode(msg.Txid, true); serialSendMsg != nil { return } @@ -1337,6 +1359,16 @@ func (handler *Handler) handleQueryChaincode(msg *pb.ChaincodeMessage) { chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec} transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Txid, pb.Transaction_CHAINCODE_QUERY) + tsc := handler.getTxContext(msg.Txid).transactionSecContext + + transaction.Nonce = tsc.Nonce + transaction.ConfidentialityLevel = tsc.ConfidentialityLevel + transaction.ConfidentialityProtocolVersion = tsc.ConfidentialityProtocolVersion + transaction.Metadata = tsc.Metadata + transaction.Cert = tsc.Cert + + chaincodeLogger.Debugf("[%s]Invoking another chaincode", shorttxid(msg.Txid)) + // Launch the new chaincode if not already running _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction) if launchErr != nil { @@ -1466,7 +1498,7 @@ func (handler *Handler) sendExecuteMessage(msg *pb.ChaincodeMessage, tx *pb.Tran } //if security is disabled the context elements will just be nil - if err := handler.setChaincodeSecurityContext(tx, msg); err != nil { + if err := handler.setChaincodeSecurityContext(tx, nil, msg); err != nil { return nil, err } diff --git a/core/chaincode/shim/chaincode.go b/core/chaincode/shim/chaincode.go index 9fc215659ed..003bcb602ad 100644 --- a/core/chaincode/shim/chaincode.go +++ b/core/chaincode/shim/chaincode.go @@ -68,10 +68,7 @@ func Start(cc Chaincode) error { backendFormatter := logging.NewBackendFormatter(backend, format) logging.SetBackend(backendFormatter).SetLevel(logging.Level(shimLoggingLevel), "shim") - viper.SetEnvPrefix("CORE") - viper.AutomaticEnv() - replacer := strings.NewReplacer(".", "_") - viper.SetEnvKeyReplacer(replacer) + SetChaincodeLoggingLevel() flag.StringVar(&peerAddress, "peer.address", "", "peer address") @@ -105,6 +102,31 @@ func Start(cc Chaincode) error { return err } +// IsEnabledForLogLevel checks to see if the chaincodeLogger is enabled for a specific logging level +// used primarily for testing +func IsEnabledForLogLevel(logLevel string) bool { + lvl, _ := logging.LogLevel(logLevel) + return chaincodeLogger.IsEnabledFor(lvl) +} + +// SetChaincodeLoggingLevel sets the chaincode logging level to the value +// of CORE_LOGGING_CHAINCODE set from core.yaml by chaincode_support.go +func SetChaincodeLoggingLevel() { + viper.SetEnvPrefix("CORE") + viper.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + + chaincodeLogLevelString := viper.GetString("logging.chaincode") + chaincodeLogLevel, err := LogLevel(chaincodeLogLevelString) + + if err == nil { + SetLoggingLevel(chaincodeLogLevel) + } else { + chaincodeLogger.Infof("error with chaincode log level: %s level= %s\n", err, chaincodeLogLevelString) + } +} + // StartInProc is an entry point for system chaincodes bootstrap. It is not an // API for chaincodes. func StartInProc(env []string, args []string, cc Chaincode, recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) error { diff --git a/core/chaincode/shim/mockstub_test.go b/core/chaincode/shim/mockstub_test.go index 39611c65780..0a5d417a9d1 100644 --- a/core/chaincode/shim/mockstub_test.go +++ b/core/chaincode/shim/mockstub_test.go @@ -19,6 +19,8 @@ package shim import ( "fmt" "testing" + + "github.com/spf13/viper" ) func TestMockStateRangeQueryIterator(t *testing.T) { @@ -50,3 +52,17 @@ func TestMockStateRangeQueryIterator(t *testing.T) { } } } + +// TestSetChaincodeLoggingLevel uses the utlity function defined in chaincode.go to +// set the chaincodeLogger's logging level +func TestSetChaincodeLoggingLevel(t *testing.T) { + // set log level to a non-default level + testLogLevelString := "debug" + viper.Set("logging.chaincode", testLogLevelString) + + SetChaincodeLoggingLevel() + + if !IsEnabledForLogLevel(testLogLevelString) { + t.FailNow() + } +} diff --git a/core/ledger/genesis/genesis_test.yaml b/core/ledger/genesis/genesis_test.yaml index 7a1010a7680..b78bbcc77fd 100644 --- a/core/ledger/genesis/genesis_test.yaml +++ b/core/ledger/genesis/genesis_test.yaml @@ -45,14 +45,6 @@ peer: # networkId: test networkId: dev - Dockerfile: | - from hyperledger/fabric-baseimage - # Copy GOPATH src and install Peer - COPY src $GOPATH/src - RUN mkdir -p /var/hyperledger/db - WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/ - RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin - # The Address this Peer will bind to for providing services address: 0.0.0.0:7051 # Whether the Peer should programmatically determine the address to bind to. This case is useful for docker containers. @@ -186,9 +178,9 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH #timeout for starting up a container and waiting for Register to come through startuptimeout: 20000 diff --git a/core/rest/rest_api.json b/core/rest/rest_api.json index 7bcd6e2bb1c..75ee953a86f 100644 --- a/core/rest/rest_api.json +++ b/core/rest/rest_api.json @@ -451,15 +451,9 @@ "type": "object", "properties": { "type": { - "type": "string", - "default": "GOLANG", - "example": "GOLANG", - "enum":[ - "UNDEFINED", - "GOLANG", - "NODE", - "JAVA" - ], + "type": "integer", + "default": 1, + "example": 1, "description": "Chaincode specification language." }, "chaincodeID": { diff --git a/core/rest/rest_test.yaml b/core/rest/rest_test.yaml index 8393e7332ff..6746cc73c74 100644 --- a/core/rest/rest_test.yaml +++ b/core/rest/rest_test.yaml @@ -296,17 +296,16 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-ccenv + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests diff --git a/devenv/Vagrantfile b/devenv/Vagrantfile index 8d9d6999daf..54af341894a 100644 --- a/devenv/Vagrantfile +++ b/devenv/Vagrantfile @@ -27,7 +27,7 @@ cd #{SRCMOUNT}/devenv SCRIPT -baseimage_release = File.read '../images/base/release' +baseimage_release = File.read '../.baseimage-release' Vagrant.require_version ">= 1.7.4" Vagrant.configure('2') do |config| diff --git a/devenv/compile_protos.sh b/devenv/compile_protos.sh index ad833ecd8d0..cdee9780e41 100755 --- a/devenv/compile_protos.sh +++ b/devenv/compile_protos.sh @@ -5,7 +5,7 @@ set -eux # Compile protos ALL_PROTO_FILES="$(find . -name "*.proto" -exec readlink -f {} \;)" PROTO_DIRS="$(dirname $ALL_PROTO_FILES | sort | uniq)" -PROTO_DIRS_WITHOUT_JAVA_AND_SDK="$(printf '%s\n' $PROTO_DIRS | grep -v "shim/java" | grep -v "/sdk")" +PROTO_DIRS_WITHOUT_JAVA_AND_SDK="$(printf '%s\n' $PROTO_DIRS | grep -v "shim/java" | grep -v "/sdk" | grep -v "/vendor")" for dir in $PROTO_DIRS_WITHOUT_JAVA_AND_SDK; do cd "$dir" protoc --proto_path="$dir" --go_out=plugins=grpc:. "$dir"/*.proto diff --git a/devenv/setup.sh b/devenv/setup.sh index 08dac37e90c..591a01c4830 100755 --- a/devenv/setup.sh +++ b/devenv/setup.sh @@ -40,19 +40,6 @@ DEVENV_REVISION=`(cd /hyperledger/devenv; git rev-parse --short HEAD)` SCRIPT_DIR="$(readlink -f "$(dirname "$0")")" cat "$SCRIPT_DIR/failure-motd.in" >> /etc/motd -# Update system -apt-get update -qq - -# Prep apt-get for docker install -apt-get install -y apt-transport-https ca-certificates -apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - -# Add docker repository -echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list - -# Update system -apt-get update -qq - # Storage backend logic case "${DOCKER_STORAGE_BACKEND}" in aufs|AUFS|"") @@ -72,9 +59,6 @@ case "${DOCKER_STORAGE_BACKEND}" in exit 1;; esac -# Install docker -apt-get install -y linux-image-extra-$(uname -r) apparmor docker-engine - # Configure docker DOCKER_OPTS="-s=${DOCKER_STORAGE_BACKEND_STRING} -r=true --api-cors-header='*' -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock ${DOCKER_OPTS}" sed -i.bak '/^DOCKER_OPTS=/{h;s|=.*|=\"'"${DOCKER_OPTS}"'\"|};${x;/^$/{s||DOCKER_OPTS=\"'"${DOCKER_OPTS}"'\"|;H};x}' /etc/default/docker @@ -85,9 +69,6 @@ usermod -a -G docker vagrant # Add vagrant user to the docker group # Test docker docker run --rm busybox echo All good -# Run our common setup -/hyperledger/scripts/provision/host.sh - # Set Go environment variables needed by other scripts export GOPATH="/opt/gopath" export GOROOT="/opt/go/" @@ -97,9 +78,12 @@ PATH=$GOROOT/bin:$GOPATH/bin:$PATH sudo mkdir -p /var/hyperledger sudo chown -R vagrant:vagrant /var/hyperledger -# Build the actual hyperledger peer (must be done before chown below) +# clean any previous builds as they may have image/.dummy files without +# the backing docker images (since we are, by definition, rebuilding the +# filesystem) and then ensure we have a fresh set of our go-tools. +# NOTE: This must be done before the chown below cd $GOPATH/src/github.com/hyperledger/fabric -make clean peer gotools +make clean gotools # Ensure permissions are set for GOPATH sudo chown -R vagrant:vagrant $GOPATH diff --git a/docs/API/AttributesUsage.md b/docs/API/AttributesUsage.md index fa3bd401dcc..b5774c618a8 100644 --- a/docs/API/AttributesUsage.md +++ b/docs/API/AttributesUsage.md @@ -93,6 +93,13 @@ To deploy a chaincode with attributes "company" and "position" it should be writ ``` +Or: + +``` +./peer chaincode deploy -u userName -n mycc -c '{"Args": ["init"]}' -a '["position", "company"]' + +``` + #### REST ``` @@ -145,6 +152,13 @@ To invoke "autorizable counter" with attributes "company" and "position" it shou ``` +Or: + +``` +./peer chaincode invoke -u userName -n mycc -c '{"Args": ["increment"]}' -a '["position", "company"]' + +``` + #### REST ``` @@ -202,6 +216,13 @@ To query "autorizable counter" with attributes "company" and "position" it shoul ``` +Or: + +``` +./peer chaincode query -u userName -n mycc -c '{"Args": ["read"]}' -a '["position", "company"]' + +``` + #### REST ``` diff --git a/docs/API/CoreAPI.md b/docs/API/CoreAPI.md index 5981096b057..4b622437003 100644 --- a/docs/API/CoreAPI.md +++ b/docs/API/CoreAPI.md @@ -7,9 +7,9 @@ This document covers the available APIs for interacting with a peer node. Three 1. [CLI](#cli) 2. [REST API](#rest-api) 3. [Node.js Application](#nodejs-application) - * [Using Swagger JS Plugin](#using-swagger-js-plugin) - * [Marbles Demo Application](#marbles-demo-application) - * [Commercial Paper Demo Application](#commercial-paper-demo-application) + * [Using Swagger JS Plugin](#using-swagger-js-plugin) + * [Marbles Demo Application](#marbles-demo-application) + * [Commercial Paper Demo Application](#commercial-paper-demo-application) **Note:** If you are working with APIs with security enabled, please review the [security setup instructions](https://github.com/hyperledger/fabric/blob/master/docs/Setup/Chaincode-setup.md#security-setup-optional) before proceeding. @@ -68,13 +68,28 @@ Command | **stdout** result in the event of success Deploy creates the docker image for the chaincode and subsequently deploys the package to the validating peer. An example is below. -`peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}'` +``` +peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}' +``` +Or: + +``` +peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Args": ["init", "a","100", "b", "200"]}' +``` The response to the chaincode deploy command will contain the chaincode identifier (hash) which will be required on subsequent `chaincode invoke` and `chaincode query` commands in order to identify the deployed chaincode. With security enabled, modify the command to include the -u parameter passing the username of a logged in user as follows: -`peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}'` +``` +peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}' +``` + +Or: + +``` +peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Args": ["init", "a","100", "b", "200"]}' +``` **Note:** If your GOPATH environment variable contains more than one element, the chaincode must be found in the first one or deployment will fail. @@ -157,19 +172,19 @@ You can work with the REST API through any tool of your choice. For example, the To learn about the REST API through Swagger, please take a look at the Swagger document [here](https://github.com/hyperledger/fabric/blob/master/core/rest/rest_api.json). You can upload the service description file to the Swagger service directly or, if you prefer, you can set up Swagger locally by following the instructions [here](#to-set-up-swagger-ui). * [Block](#block) - * GET /chain/blocks/{Block} + * GET /chain/blocks/{Block} * [Blockchain](#blockchain) - * GET /chain + * GET /chain * [Chaincode](#chaincode) * POST /chaincode * [Network](#network) - * GET /network/peers + * GET /network/peers * [Registrar](#registrar) - * POST /registrar - * DELETE /registrar/{enrollmentID} - * GET /registrar/{enrollmentID} - * GET /registrar/{enrollmentID}/ecert - * GET /registrar/{enrollmentID}/tcert + * POST /registrar + * DELETE /registrar/{enrollmentID} + * GET /registrar/{enrollmentID} + * GET /registrar/{enrollmentID}/ecert + * GET /registrar/{enrollmentID}/tcert * [Transactions](#transactions) * GET /transactions/{UUID} @@ -227,8 +242,7 @@ POST host:port/chaincode "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { - "function":"init", - "args":["a", "1000", "b", "2000"] + "args":["init", "a", "1000", "b", "2000"] } }, "id": 1 @@ -251,8 +265,7 @@ POST host:port/chaincode "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { - "function":"init", - "args":["a", "1000", "b", "2000"] + "args":["init", "a", "1000", "b", "2000"] }, "secureContext": "lukas" }, @@ -289,8 +302,7 @@ Chaincode Invocation Request without security enabled: "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"invoke", - "args":["a", "b", "100"] + "args":["invoke", "a", "b", "100"] } }, "id": 3 @@ -311,8 +323,7 @@ Chaincode Invocation Request with security enabled (add `secureContext` element) "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"invoke", - "args":["a", "b", "100"] + "args":["invoke", "a", "b", "100"] }, "secureContext": "lukas" }, @@ -349,8 +360,7 @@ Chaincode Query Request without security enabled: "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"query", - "args":["a"] + "args":["query", "a"] } }, "id": 5 @@ -371,8 +381,7 @@ Chaincode Query Request with security enabled (add `secureContext` element): "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"query", - "args":["a"] + "args":["query", "a"] }, "secureContext": "lukas" }, diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 92102988acd..2bfad6ca9b1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -10,8 +10,18 @@ before participating. It is important that we keep things civil. ## Getting a Linux Foundation account In order to participate in the development of the Hyperledger Fabric project, -you will need an [LF account](Gerrit/lf-account.md). This will give you single -sign-on to all the community tools, including Gerrit and Jira (coming soon!). +you will need an [LF account](Gerrit/lf-account.md). You will need to use +your LF ID to grant you access to all the Hyperledger community tools, including +[Gerrit](https://gerrit.hyperledger.org) and [Jira](https://jira.hyperledger.org). + +### Setting up your SSH key + +For Gerrit, you will want to register your public SSH key. Login to +[Gerrit](https://gerrit.hyperledger.org) +with your LF account, and click on your name in the upper right-hand corner +and then click 'Settings'. In the left-hand margin, you should see a link for +'SSH Public Keys'. Copy-n-paste your [public SSH key](https://help.github.com/articles/generating-an-ssh-key/) +into the window and press 'Add'. ## Getting help diff --git a/docs/Setup/Chaincode-setup.md b/docs/Setup/Chaincode-setup.md index 8c4dac74e6b..2acd25c2022 100644 --- a/docs/Setup/Chaincode-setup.md +++ b/docs/Setup/Chaincode-setup.md @@ -4,7 +4,7 @@ Chaincode developers need a way to test and debug their chaincode without having The following instructions apply to _developing_ chaincode in Go or Java. They do not apply to running in a production environment. However, if _developing_ chaincode in Java, please see the [Java chaincode setup](https://github.com/hyperledger/fabric/blob/master/docs/Setup/JAVAChaincode.md) instructions first, to be sure your environment is properly configured. -**Note:** We have added support for [System chaincode](https://github.com/hyperledger/fabric/blob/master/docs/SystemChaincodes/noop.md). +**Note:** We have added support for [System chaincode](https://github.com/hyperledger/fabric/blob/master/docs/SystemChaincode-noop.md). ## Choices diff --git a/docs/SystemChaincodes/noop.md b/docs/SystemChaincode-noop.md similarity index 100% rename from docs/SystemChaincodes/noop.md rename to docs/SystemChaincode-noop.md diff --git a/docs/abstract_v1.md b/docs/abstract_v1.md new file mode 100644 index 00000000000..859b1c7fc60 --- /dev/null +++ b/docs/abstract_v1.md @@ -0,0 +1,28 @@ +# HYPERLEDGER FABRIC v1.0 + +They Hyperledger Fabric is a platform that enables the delivery of a secure, robust, permissioned blockchain for the enterprise that incorporates a byzantine fault tolerant consensus. We have learned much as we progressed throuh the v0.6-preview release. In particular, that in order to provide for the scalability and confidentiality needs of many use cases, a refactoring of the architecture was needed. The v0.6-preview release will be the final (barring any bug fixes) release based upon the original architecture. + +The Hyperledger Fabric v1.0 architecture has been designed to address two vital enterprise-grade requirements – **security** and **scalability**. Businesses can leverage this new architecture to execute confidential transactions on networks with shared or common assets – e.g. supply chain, FOREX market, healthcare, etc. The progression to V1 will be incremental, with myriad windows for community members to contribute code and start curating the fabric to fit specific business needs. + +## WHERE WE ARE: + +The current implementation involves every validating peer shouldering the responsibility for the full gauntlet of network functionality. They execute transactions, perform consensus, and maintain the shared ledger. Not only does this configuration lay a huge computational burden on each peer, hindering scalability, but it also constricts important facets of privacy and confidentiality. Namely, there is no mechanism to “channel” or “silo” confidential transactions. Every peer can see the logic for every transaction. + +## WHERE WE'RE GOING + +The new architecture introduces a clear functional separation of peer roles, and allows a transaction to pass through the network in a structured and modularized fashion. The peers are diverged into two distinct roles – Endorser & Committer. As an endorser, the peer will simulate the transaction and ensure that the outcome is deterministic or stable. As a committer, the peer will validate the integrity of a transaction and then append to the ledger. Now confidential transactions can be sent to specific endorsers and their correlating committers, without the network being made cognizant of the transaction. Additionally, policies can be set to determine what levels of “endorsement” and “validation” are acceptable for a specific class of transactions. A failure to meet these thresholds would simply result in a transaction being thrown out, rather than imploding or stagnating the entire network. This new model also introduces the possibility for more elaborate networks, such as a foreign exchange market. Entities may need to only participate as endorsers for their transactions, while leaving consensus and commitment (i.e. settlement) to a trusted third party such as a clearing house. + +The consensus process (i.e. algorithmic computation) is entirely abstracted from the peer. This modularity not only provides a powerful security layer – the consenting nodes are agnostic to the transaction logic – but it also generates a framework where consensus can become pluggable and scalability can truly occur. There is no longer a parallel relationship between the number of peers in a network and the number of consenters. Now networks can grow dynamically (i.e. add endorsers and committers) without having to add corresponding consenters, and exist in a modular infrastructure designed to support high transaction throughput. Moreover, networks now have the capability to completely liberate themselves from the computational and legal burden of consensus by tapping into a pre-existing consensus cloud. + +As V1 manifests, we will see the foundation for interoperable blockchain networks that have the ability to scale and transact in a manner adherent with regulatory and industry standards. The following is a high-level overview of the upcoming additions to the fabric codebase that will spawn the capabilities in V1: + +## HOW TO CONTRIBUTE + +1. Familiarize yourself with the [guidelines for code contributions](CONTRIBUTING.md) to this project. **Note**: In order to participate in the development of the Hyperledger Fabric project, you will need an [LF account](Gerrit/lf-account.md). This will give you single +sign-on to JIRA and Gerritt. +1. Explore the design document for the new [architecture](https://github.com/hyperledger-archives/fabric/wiki/Next-Consensus-Architecture-Proposal) +1. Explore [JIRA](https://jira.hyperledger.org/projects/FAB/issues/) for open Hyperledger Fabric issues. +1. Explore the [JIRA](https://jira.hyperledger.org/projects/FAB/issues/) backlog for upcoming Hyperledger Fabric issues. +1. Explore [JIRA](https://jira.hyperledger.org/issues/?filter=10147) for Hyperledger Fabric issues tagged with "help wanted." +1. Explore the [source code](https://github.com/hyperledger/fabric) +1. Explore the [documentation](http://hyperledger-fabric.readthedocs.io/en/latest/) diff --git a/docs/custom_theme/searchbox.html b/docs/custom_theme/searchbox.html new file mode 100644 index 00000000000..33ed3e2439c --- /dev/null +++ b/docs/custom_theme/searchbox.html @@ -0,0 +1,5 @@ +
+
+ +
+
diff --git a/docs/images/standalone-app-developer.png b/docs/images/standalone-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/images/standalone-app-developer.png differ diff --git a/docs/images/web-app-developer.png b/docs/images/web-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/images/web-app-developer.png differ diff --git a/docs/index.md b/docs/index.md old mode 100755 new mode 100644 index 6d4d23ba778..d9f5b96041f --- a/docs/index.md +++ b/docs/index.md @@ -19,21 +19,25 @@ to host any mainstream language for smart contracts development. ## Releases -The fabric releases are documented -[here](https://github.com/hyperledger/fabric/wiki/Fabric-Releases). We have just -released our first release under the governance of the Hyperledger Project - -v0.5-developer-preview. +The fabric releases are documented [here](releases.md). We have just +released our second release under the governance of the Hyperledger Project - +v0.6-preview. + +## Fabric Starter Kit + +If you'd like to dive right in and get an operational experience on your local +server or laptop to begin development, we have just the thing for you. We have +created a standalone Docker-based [starter kit](starter/fabric-starter-kit.md) +that leverages the latest published Docker images that you can run on your +laptop and be up and running in no time. That should get you going with a +sample application and some simple chaincode. From there, you can go deeper +by exploring our [developer guides](#developer-guides). ## Contributing to the project We welcome contributions to the Hyperledger Project in many forms. There's always plenty to do! Full details of how to contribute to this project are -documented in the [Fabric developer's guide](#fabric-developer-guide) below. - -To contribute to this documentation, create an issue for any requests for -clarification or to highlight any errors, or you may clone and update the -[source](https://gerrit.hyperledger.org/r/#/admin/projects/fabric), and submit a -Gerrit review (essentially the same process as for fabric development). +documented in the [Fabric developer's guide](#fabric-developer's-guide) below. ## Maintainers @@ -45,42 +49,46 @@ Project's Technical Steering Committee (TSC). ## Communication We use [Hyperledger Slack](https://slack.hyperledger.org/) for communication and -Google Hangouts™ for screen sharing between developers. +Google Hangouts™ for screen sharing between developers. Our development +planning and prioritization is done in [JIRA](https://jira.hyperledger.org), +and we take longer running discussions/decisions to the +[mailing list](http://lists.hyperledger.org/mailman/listinfo/hyperledger-fabric). + +## Still Have Questions? +We try to maintain a comprehensive set of documentation (see below) for various audiences. +However, we realize that often there are questions that remain unanswered. For +any technical questions relating to the Hyperledger Fabric project not answered +in this documentation, please use +[StackOverflow](http://stackoverflow.com/questions/tagged/hyperledger). If you +need help finding things, please don't hesitate to send a note to the +[mailing list](http://lists.hyperledger.org/mailman/listinfo/hyperledger-fabric), +or ask on [Slack]((https://slack.hyperledger.org/)). # Hyperledger Fabric Documentation -The Hyperledger -[fabric](https://gerrit.hyperledger.org/r/#/admin/projects/fabric) is an -implementation of blockchain technology, that has been collaboratively developed -under the Linux Foundation's [Hyperledger Project](http://hyperledger.org). It -leverages familiar and proven technologies, and offers a modular architecture +The Hyperledger fabric is an implementation of blockchain technology, that has +been collaboratively developed under the Linux Foundation's +[Hyperledger Project](http://hyperledger.org). It leverages familiar and +proven technologies, and offers a modular architecture that allows pluggable implementations of various function including membership services, consensus, and smart contracts (Chaincode) execution. It features powerful container technology to host any mainstream language for smart contracts development. -## Still Have Questions? -We try to maintain a comprehensive set of documentation for various audiences. -However, we realize that often there are questions that remain unanswered. For -any technical questions relating to the Hyperledger Fabric project not answered -in this documentation, please use -[StackOverflow](http://stackoverflow.com/questions/tagged/hyperledger). - -## TOC +## Table of Contents Below, you'll find the following sections: -- [Getting started](#getting-started) -- [Quickstart](#quickstart-documentation) +- [Read All About It](#read-all-about-it) - [Developer guides](#developer-guides) - - [Fabric developer's guide](#fabric-developer-guide) - [Chaincode developer's guide](#chaincode-developer-guide) - - [API developer's guide](#api-developer-guide) + - [Application developer's guide](#application-developer-guide) + - [Fabric developer's guide](#fabric-developer-guide) - [Operations guide](#operations-guide) -# Getting started +## Read all about it If you are new to the project, you can begin by reviewing the following links. If you'd prefer to dive right in, see the @@ -96,47 +104,8 @@ where the community is developing use cases and requirements. the Fabric project's documentation. - [Fabric FAQs](https://github.com/hyperledger/fabric/tree/master/docs/FAQ) -# Quickstart documentation - -- [Development environment set-up](dev-setup/devenv.md): if you are considering -helping with development of the Hyperledger Fabric or Fabric-API projects -themselves, this guide will help you install and configure all you'll need. The -development environment is also useful (but, not necessary) for developing -blockchain applications and/or Chaincode. -- [Network setup](Setup/Network-setup.md): This document covers setting up a -network on your local machine for development. -- [Chaincode development environment](Setup/Chaincode-setup.md): Chaincode -developers need a way to test and debug their Chaincode without having to set up -a complete peer network. This document describes how to write, build, and test -Chaincode in a local development environment. -- [APIs](API/CoreAPI.md): This document covers the available APIs for -interacting with a peer node. - # Developer guides -## Fabric developer guide - -When you are ready to start contributing to the Hyperledger fabric project, we -strongly recommend that you read the [protocol specification](protocol-spec.md) -for the technical details so that you have a better understanding of how the -code fits together. - -- [Making code contributions](CONTRIBUTING.md): First, you'll want to familiarize -yourself with the project's contribution guidelines. -- [Setting up the development environment](dev-setup/devenv.md): after that, you -will want to set up your development environment. -- [Building the fabric core](dev-setup/build.md): next, try building the project -in your local development environment to ensure that everything is set up -correctly. -- [Building outside of Vagrant](dev-setup/build.md#building-outside-of-vagrant): -for the adventurous, you might try to build outside of the standard Vagrant -development environment. -- [Logging control](Setup/logging-control.md): describes how to tweak the logging -levels of various components within the fabric. -- [License header](dev-setup/headers.txt): every source file must include this -license header modified to include a copyright statement for the principle -author(s). - ## Chaincode developer guide - [Setting up the development environment](dev-setup/devenv.md): when developing @@ -152,22 +121,40 @@ testing Chaincode. - [Chaincode FAQ](FAQ/chaincode_FAQ.md): a FAQ for all of your burning questions relating to Chaincode. -## API developer guide - -- [APIs - CLI, REST, and Node.js](API/CoreAPI.md) - - [CLI](API/CoreAPI.md#cli): working with the command-line interface. - - [REST](API/CoreAPI.md#rest-api): working with the REST API. - - [Node.js SDK](nodeSDK/node-sdk-guide.md): working with the Node.js SDK. - +## Application developer guide +``` + - [APIs - CLI, REST, and Node.js](API/CoreAPI.md) + - [CLI](API/CoreAPI.md#cli): working with the command-line interface. + - [REST](API/CoreAPI.md#rest-api): working with the REST API (*deprecated*). + - [Node.js SDK](nodeSDK/node-sdk-guide.md): working with the Node.js SDK. +``` +## Fabric developer guide +``` + - [Making code contributions](CONTRIBUTING.md): First, you'll want to familiarize + yourself with the project's contribution guidelines. + - [Setting up the development environment](dev-setup/devenv.md): after that, you + will want to set up your development environment. + - [Building the fabric core](dev-setup/build.md): next, try building the project + in your local development environment to ensure that everything is set up + correctly. + - [Building outside of Vagrant](dev-setup/build.md#building-outside-of-vagrant): + for the *adventurous*, you might try to build outside of the standard Vagrant + development environment. + - [Logging control](Setup/logging-control.md): describes how to tweak the logging + levels of various components within the fabric. + - [License header](dev-setup/headers.txt): every source file must include this + license header modified to include a copyright statement for the principle + author(s). +``` # Operations guide - -- [Setting Up a Network](Setup/Network-setup.md): instructions for setting up a -network of fabric peers. -- [Certificate Authority (CA) Setup](Setup/ca-setup.md): setting up a CA to -support identity, security (authentication/authorization), privacy and -confidentiality. -- [Application ACL](tech/application-ACL.md): working with access control lists. - -## License +``` + - [Setting Up a Network](Setup/Network-setup.md): instructions for setting up a + network of fabric peers. + - [Certificate Authority (CA) Setup](Setup/ca-setup.md): setting up a CA to + support identity, security (authentication/authorization), privacy and + confidentiality. + - [Application ACL](tech/application-ACL.md): working with access control lists. +``` +# License The Hyperledger Project uses the [Apache License Version 2.0](LICENSE) software license. diff --git a/docs/nodeSDK/images/standalone-app-developer.png b/docs/nodeSDK/images/standalone-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/nodeSDK/images/standalone-app-developer.png differ diff --git a/docs/nodeSDK/images/web-app-developer.png b/docs/nodeSDK/images/web-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/nodeSDK/images/web-app-developer.png differ diff --git a/docs/nodeSDK/node-sdk-self-contained.md b/docs/nodeSDK/node-sdk-self-contained.md deleted file mode 100755 index 08c6e4fac48..00000000000 --- a/docs/nodeSDK/node-sdk-self-contained.md +++ /dev/null @@ -1,99 +0,0 @@ -# Self Contained Node.js Environment - -This section describes how to set up a self contained environment for Node.js application development with the Hyperledger Fabric Node.js SDK. The setup uses **Docker** to provide a controlled environment with all the necessary Hyperledger fabric components to support a Node.js application. There are three **Docker** images that when run will provide a blockchain network environment. There is an image to run a single **Peer**, one to run the **Member Services** and one to run both a Node.js application and the sample chaincode. See [Application Developer's Overview](app-overview.md) on how the components running within the containers will communicate. The sample comes with a sample Node.js application ready to execute and sample chaincode. The sample will be running in developer mode where the chaincode has been built and started prior to the application call to deploy it. The deployment of chaincode in network mode requires that the Hyperledger Fabric Node.js SDK has access to the chaincode source code and all of its dependant code, in order to properly build a deploy request. It also requires that the **peer** have access to **docker** functions to be able to build and deploy the new **docker** image that will run the chaincode. This is a more complicated configuration and not suitable to an introduction to the Hyperledger Fabric Node.js SDK. - -**note:** This sample was prepared using Docker for Mac 1.12.0 - -* Prerequisite software to install: - - * Docker - * docker-compose (may be packaged with Docker) - -* Copy our [docker-compose.yml](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml) file to a local directory: - -``` - curl -o docker-compose.yml https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml -``` -* **Optionally** build your own docker images. - The docker compose environment uses three docker images. If you wish to customize and build your own docker images. The following [Dockerfile](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile) - was used to build the **nodesdk** image and may be used as a starting point to your own customizations. -``` - curl -o Dockerfile https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile - docker build -t hyperledger/fabric-node-sdk:latest . -``` - -* Start the fabric network environment using docker-compose. From a terminal session that has the working directory of where the above *docker-compose.yml* is located, execute one of following **docker-compose** commands. - - * to run as detached containers: - ``` - docker-compose up -d - ``` - **note:** to see the logs for the **peer** container use the `docker logs peer` command - - * to run in the foreground and see the log output in the current terminal session: - ``` - docker-compose up - ``` - - Both commands will start three docker containers, to view the container status try `docker ps` command. The first time this is run the **docker** images will be downloaded. This may take 10 minutes or more depending on the network connections of the system running the command. - ``` - docker ps - ``` - - You should see something like the following: - ``` - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - bb01a2fa96ef hyperledger/fabric-node-sdk "sh -c 'sleep 20; /op" About a minute ago Up 59 seconds nodesdk - ec7572e65f12 hyperledger/fabric-peer "sh -c 'sleep 10; pee" About a minute ago Up About a minute peer - 118ef6da1709 hyperledger/fabric-membersrvc "membersrvc" About a minute ago Up About a minute membersrvc - ``` - -* Start a terminal session in the **nodesdk** container. This is where the Node.js application is located. - - **note:** Be sure to wait 20 seconds after starting the network using the `docker-compose up` command before executing the following command to allow the network to initialize. - -``` - docker exec -it nodesdk /bin/bash -``` - -* From the terminal session in the **nodesdk** container execute the standalone Node.js application. The docker terminal session should be in the working directory of the sample application called **app.js** (*/opt/gopath/src/github.com/hyperledger/fabric/examples/sdk/node*). Execute the following Node.js command to run the application. - -``` - node app -``` - In another terminal session on the host you can view the logs for the peer by executing the following command (not in the docker shell above, in a new terminal session of the real system) -``` - docker logs peer -``` - -* If you wish to run your own Node.js application using the pre build docker images: - * use the directories in the `volumes` tag under **nodesdk** in the `docker-compose.yml` file as a place to store your programs from the host system into the docker container. The first path is the top level system (host system) and the second is created in the docker container. If you wish to use a host location that is not under the `/Users` directory (`~` is under `/Users') then you must add that to the **docker** file sharing under **docker** preferences. - -```yaml - volumes: - - ~/mytest:/user/mytest -``` - * copy or create and edit your application in the `~/mytest` directory as stated in the `docker-compose.yml` `volumes` tag under **nodesdk** container. - * run npm to install Hyperledger Fabric Node.js SDK in the `mytest` directory -``` - npm install /opt/gopath/src/github.com/hyperledger/fabric/sdk/node -``` - * run the application from within the **nodesdk** docker container using the commands -``` - docker exec -it nodesdk /bin/bash -``` - once in the shell, and assuming your Node.js application is called `app.js` -``` - cd /user/mytest - node app -``` -* To shutdown the environment, execute the following **docker-compose** command in the directory where the *docker-compose.yml* is located. Any changes you made to the sample application or deployment of a chaincode will be lost. Only changes made to the shared area defined in the 'volumes' tag of the **nodesdk** container will persist. This will shutdown each of the containers and remove the containers from **docker**: - -``` - docker-compose down -``` - or if you wish to keep your changes and just stop the containers, which will be restarted on the next `up` command - -``` - docker-compose kill -``` diff --git a/docs/releases.md b/docs/releases.md new file mode 100644 index 00000000000..51ba90eadfc --- /dev/null +++ b/docs/releases.md @@ -0,0 +1,62 @@ + + +[v0.6-preview](https://github.com/hyperledger/fabric/tree/v0.6) September 16, 2016 + +A developer preview release of the Hyperledger Fabric intended +to exercise the release logistics and stabilize a set of capabilities for +developers to try out. This will be the last release under the original +architecture. All subsequent releases will deliver on the +[v1.0 architecture](TODO). + +Key enhancements: + +* 8de58ed - NodeSDK doc changes -- FAB-146 +* 62d866d - Add flow control to SYNC_STATE_SNAPSHOT +* 4d97069 - Adding TLS changes to SDK +* e9d3ac2 - Node-SDK: add support for fabric events(block, chaincode, transactional) +* 7ed9533 - Allow deploying Java chaincode from remote git repositories +* 4bf9b93 - Move Docker-Compose files into their own folder +* ce9fcdc - Print ChaincodeName when deploy with CLI +* 4fa1360 - Upgrade go protobuf from 3-beta to 3 +* 4b13232 - Table implementation in java shim with example +* df741bc - Add support for dynamically registering a user with attributes +* 4203ea8 - Check for duplicates when adding peers to the chain +* 518f3c9 - Update docker openjdk image +* 47053cd - Add GetTxID function to Stub interface (FAB-306) +* ac182fa - Remove deprecated devops REST API +* ad4645d - Support hyperledger fabric build on ppc64le platform +* 21a4a8a - SDK now properly adding a peer with an invalid URL +* 1d8114f - Fix setting of watermark on restore from crash +* a98c59a - Upgrade go protobuff from 3-beta to 3 +* 937039c - DEVENV: Provide strong feedback when provisioning fails +* d74b1c5 - Make pbft broadcast timeout configurable +* 97ed71f - Java shim/chaincode project reorg, separate java docker env +* a76dd3d - Start container with HostConfig was deprecated since v1.10 and removed since v1.12 +* 8b63a26 - Add ability to unregister for events +* 3f5b2fa - Add automatic peer command detection +* 6daedfd - Re-enable sending of chaincode events +* b39c93a - Update Cobra and pflag vendor libraries +* dad7a9d - Reassign port numbers to 7050-7060 range + +[v0.5-developer-preview](https://github.com/hyperledger-archives/fabric/tree/v0.5-developer-preview) +June 17, 2016 + +A developer preview release of the Hyperledger Fabric intended +to exercise the release logistics and stabilize a set of capabilities for +developers to try out. + +Key features: + +Permissioned blockchain with immediate finality +Chaincode (aka smart contract) execution environments +Docker container (user chaincode) +In-process with peer (system chaincode) +Pluggable consensus with PBFT, NOOPS (development mode), SIEVE (prototype) +Event framework supports pre-defined and custom events +Client SDK (Node.js), basic REST APIs and CLIs +Known Key Bugs and work in progress + +* 1895 - Client SDK interfaces may crash if wrong parameter specified +* 1901 - Slow response after a few hours of stress testing +* 1911 - Missing peer event listener on the client SDK +* 889 - The attributes in the TCert are not encrypted. This work is still on-going diff --git a/docs/starter/fabric-starter-kit.md b/docs/starter/fabric-starter-kit.md new file mode 100755 index 00000000000..94d0d912ff1 --- /dev/null +++ b/docs/starter/fabric-starter-kit.md @@ -0,0 +1,171 @@ +# Fabric Starter Kit + +This section describes how to set up a self contained environment for +application development with the Hyperledger Fabric. The setup +uses **Docker** to provide a controlled environment with all the necessary +Hyperledger fabric components to support a Node.js application built with +the fabric's Node.js SDK, and chaincode written in Go. + +There are three Docker images that, when run, will provide a basic +network environment. There is an image to run a single `peer`, one to run +the `membersrvc` and one to run both your Node.js application and the your +chaincode. See [Application Developer's Overview](../nodeSDK/app-overview.md) on how the +components running within the containers will communicate. + +The starter kit comes with a sample Node.js application ready to execute and +sample chaincode. The starter kit will be running in chaincode developer mode. +In this mode, the chaincode is built and started prior to the application +making a call to deploy it. + +**Note:** The deployment of chaincode in network mode requires that the +Hyperledger Fabric Node.js SDK has access to the chaincode source code and all +of its dependencies, in order to properly build a deploy request. It also +requires that the `peer` have access to the Docker daemon to be able to build +and deploy the new Docker image that will run the chaincode. *This is a more +complicated configuration and not suitable to an introduction to the +Hyperledger fabric.* We recommend first running in chaincode development mode. + +## Further exploration + +If you wish, there are a number of chaincode examples near by. +``` + cd ../../chaincode +``` +## Getting started + +**Note:** This sample was prepared using Docker for Mac 1.12.0 + +* Prerequisite software to install: + + * [Docker](https://www.docker.com/products/overview) + * docker-compose (may be packaged with Docker) + +* Copy our [docker-compose.yml](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml) file to a local directory: + +``` + curl -o docker-compose.yml https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml +``` + The docker compose environment uses three docker images. Two are published to + DockerHub. However, the third, we provide you the source to build your own, + so that you can customize to inject your application code for development, + the following [Dockerfile](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile) + is used to build the base **fabric-starter-kit** image and may be used as + a starting point for your own customizations. + +``` + curl -o Dockerfile https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile + docker build -t hyperledger/fabric-starter-kit:latest . +``` + +* Start the fabric network environment using docker-compose. From a terminal +session that has the working directory of where the above *docker-compose.yml* +is located, execute one of following `docker-compose` commands. + + * to run as detached containers: + +``` + docker-compose up -d +``` + **note:** to see the logs for the `peer` container use the + `docker logs peer` command + + * to run in the foreground and see the log output in the current terminal + session: + +``` + docker-compose up +``` + + Both commands will start three docker containers, to view the container + status try `docker ps` command. The first time this is run the Docker + images will be downloaded. This may take 10 minutes or more depending on the + network connections of the system running the command. + +``` + docker ps +``` + You should see something like the following: + +``` + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bb01a2fa96ef hyperledger/fabric-starter-kit "sh -c 'sleep 20; /op" About a minute ago Up 59 seconds starter + ec7572e65f12 hyperledger/fabric-peer "sh -c 'sleep 10; pee" About a minute ago Up About a minute peer + 118ef6da1709 hyperledger/fabric-membersrvc "membersrvc" About a minute ago Up About a minute membersrvc +``` + +* Start a terminal session in the **starter** container. This is where the +Node.js application is located. + + **note:** Be sure to wait 20 seconds after starting the network using the + `docker-compose up` command before executing the following command to allow + the network to initialize. + +``` + docker exec -it starter /bin/bash +``` + +* From the terminal session in the **starter** container execute the standalone +Node.js application. The docker terminal session should be in the working +directory of the sample application called **app.js** (*/opt/gopath/src/github.com/hyperledger/fabric/examples/sdk/node*). Execute +the following Node.js command to run the application. + +``` + node app +``` + In another terminal session on the host you can view the logs for the peer + by executing the following command (not in the docker shell above, in a new + terminal session of the real system) + +``` + docker logs peer +``` + +* If you wish to run your own Node.js application using the pre build docker +images: + * use the directories in the `volumes` tag under **starter** in the + `docker-compose.yml` file as a place to store your programs from the host + system into the docker container. The first path is the top level system + (host system) and the second is created in the docker container. If you wish + to use a host location that is not under the `/Users` directory (`~` is + under `/Users') then you must add that to the Docker file sharing + under Docker preferences. + +```yaml + volumes: + - ~/mytest:/user/mytest +``` + * copy or create and edit your application in the `~/mytest` directory as + stated in the `docker-compose.yml` `volumes` tag under **starter** container. + * run npm to install Hyperledger Fabric Node.js SDK in the `mytest` directory + +``` + npm install /opt/gopath/src/github.com/hyperledger/fabric/sdk/node +``` + * run the application from within the **starter** Docker container using the + commands + +``` + docker exec -it starter /bin/bash +``` + once in the shell, and assuming your Node.js application is called `app.js` + +``` + cd /user/mytest + node app +``` +* To shutdown the environment, execute the following **docker-compose** command +in the directory where the *docker-compose.yml* is located. Any changes you made +to the sample application or deployment of a chaincode will be lost. Only +changes made to the shared area defined in the 'volumes' tag of the **starter** +container will persist. This will shutdown each of the containers and remove +the containers from Docker: + +``` + docker-compose down +``` + or if you wish to keep your changes and just stop the containers, which will + be restarted on the next `up` command + +``` + docker-compose kill +``` diff --git a/examples/chaincode/go/utxo/Dockerfile b/examples/chaincode/go/utxo/Dockerfile index 927b746931c..807632d9c30 100644 --- a/examples/chaincode/go/utxo/Dockerfile +++ b/examples/chaincode/go/utxo/Dockerfile @@ -1,4 +1,5 @@ -from hyperledger/fabric-baseimage +# FIXME: someone from the UTXO team will need to verify or rework this +FROM ubuntu:latest RUN apt-get update && apt-get install pkg-config autoconf libtool -y RUN cd /tmp && git clone https://github.com/bitcoin/secp256k1.git && cd secp256k1/ diff --git a/examples/sdk/node/docker-compose.yml b/examples/sdk/node/docker-compose.yml index 2000c854798..64302ade722 100644 --- a/examples/sdk/node/docker-compose.yml +++ b/examples/sdk/node/docker-compose.yml @@ -27,10 +27,11 @@ peer: links: - membersrvc -nodesdk: - container_name: nodesdk - image: hyperledger/fabric-node-sdk +starter: + container_name: starter + image: hyperledger/fabric-starter-kit volumes: + # tweak this to map a local developmnt directory tree into the container - ~/mytest:/user/mytest environment: - MEMBERSRVC_ADDRESS=membersrvc:7054 diff --git a/images/base/.gitignore b/images/base/.gitignore deleted file mode 100644 index 94143827ed0..00000000000 --- a/images/base/.gitignore +++ /dev/null @@ -1 +0,0 @@ -Dockerfile diff --git a/images/base/Dockerfile.in b/images/base/Dockerfile.in deleted file mode 100644 index 3babb952831..00000000000 --- a/images/base/Dockerfile.in +++ /dev/null @@ -1,5 +0,0 @@ -FROM _DOCKER_BASE_ -COPY scripts /hyperledger/baseimage/scripts -RUN /hyperledger/baseimage/scripts/common/init.sh -RUN /hyperledger/baseimage/scripts/docker/init.sh -RUN /hyperledger/baseimage/scripts/common/setup.sh diff --git a/images/base/Makefile b/images/base/Makefile deleted file mode 100644 index be148e69668..00000000000 --- a/images/base/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -NAME=hyperledger/fabric-baseimage -VERSION=$(shell cat ./release) -ARCH=$(shell uname -m) -DOCKER_TAG ?= $(ARCH)-$(VERSION) -VAGRANTIMAGE=packer_virtualbox-iso_virtualbox.box - -DOCKER_BASE_x86_64=ubuntu:trusty -DOCKER_BASE_s390x=s390x/ubuntu:xenial -DOCKER_BASE_ppc64le=ppc64le/ubuntu:xenial - -DOCKER_BASE=$(DOCKER_BASE_$(ARCH)) - -ifeq ($(DOCKER_BASE), ) -$(error "Architecture \"$(ARCH)\" is unsupported") -endif - - -# strips off the post-processors that try to upload artifacts to the cloud -packer-local.json: packer.json - jq 'del(."post-processors"[0][1]) | del(."post-processors"[1][1])' packer.json > $@ - -all: vagrant docker - -$(VAGRANTIMAGE): packer-local.json - BASEIMAGE_RELEASE=$(VERSION) \ - packer build -only virtualbox-iso packer-local.json - -Dockerfile: Dockerfile.in Makefile - @echo "# Generated from Dockerfile.in. DO NOT EDIT!" > $@ - @cat Dockerfile.in | \ - sed -e "s|_DOCKER_BASE_|$(DOCKER_BASE)|" >> $@ - -docker: Dockerfile release - @echo "Generating docker" - @docker build -t $(NAME):$(DOCKER_TAG) . - -vagrant: $(VAGRANTIMAGE) remove release - vagrant box add -name $(NAME) $(VAGRANTIMAGE) - -push: - @echo "You will need your ATLAS_TOKEN set for this to succeed" - packer push -name $(NAME) packer.json - -remove: - -vagrant box remove --box-version 0 $(NAME) - -clean: remove - -rm $(VAGRANTIMAGE) - -rm Dockerfile - -rm packer-local.json diff --git a/images/base/README.md b/images/base/README.md deleted file mode 100644 index 6073455aca5..00000000000 --- a/images/base/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Baseimage Introduction -This directory contains the infrastructure for creating a new baseimage used as the basis for various functions within the Hyperledger workflow such as our Vagrant based development environment, chaincode compilation/execution, unit-testing, and even cluster simulation. It is based on ubuntu-14.04 with various opensource projects added such as golang, rocksdb, grpc, and node.js. The actual Hyperledger code is injected just-in-time before deployment. The resulting images are published to public repositories such as [atlas.hashicorp.com](https://atlas.hashicorp.com/hyperledger/boxes/fabric-baseimage) for consumption by Vagrant/developers and [hub.docker.com](https://hub.docker.com/r/hyperledger/fabric-baseimage/) for consumption by docker-based workflows. - -![Baseimage Architectural Overview](./images/packer-overview.png) - -The purpose of this baseimage is to act as a bridge between a raw ubuntu/trusty64 configuration and the customizations required for supporting a hyperledger environment. Some of the FOSS components that need to be added to Ubuntu do not have convenient native packages. Therefore, they are built from source. However, the build process is generally expensive (often taking in excess of 30 minutes) so it is fairly inefficient to JIT assemble these components on demand. - -Therefore, the expensive FOSS components are built into this baseimage once and subsequently cached on the public repositories so that workflows may simply consume the objects without requiring a local build cycle. - -# Intended Audience -This is only intended for release managers curating the base images on atlas and docker-hub. Typical developers may safely ignore this directory completely. - -Anyone wishing to customize their image is encouraged to do so via downstream means, such as the vagrant infrastructure in the root directory of this project or the Dockerfile. - -## Exceptions - -If a component is found to be both broadly applicable and expensive to build JIT, it may be a candidate for inclusion in a future baseimage. - -# Usage - -## Usage Pattern 1 - Local baseimage builds for testing a proposed change - -* "make vagrant" will build just the vagrant image and install it into the local environment as "hyperledger/fabric-baseimage:v0", making it suitable to local testing. - * To utilize the new base image in your local tests, run `vagrant destroy` then `USE_LOCAL_BASEIMAGE=true vagrant up`, also preface `vagrant ssh` as `USE_LOCAL_BASEIMAGE=true vagrant ssh` or simply export that variable, or Vagrant will fail to find the ssh key. -* "make docker" will build just the docker image and commit it to your local environment as "hyperledger/fabric-baseimage" - -## Usage Pattern 2 - Release manager promoting a new base image to the public repositories - -- Step 1: Decide on the version number to be used and update the packer.json template variables:release -- Step 2: Initiate a build - -Note: You will need credentials to the public repositories, as discussed in Uploading Permissions below. If you do not have these credentials, you are probably not an image release manager. Otherwise, discuss it on the Hyperledger slack to see if you should be added. - -### Hosted Build Method - -"make push" will push the build configuration to atlas for cloud-hosted building of the images. You only need to have the ATLAS_TOKEN defined for this to succeed, as the atlas build server will push the artifacts out to the respective hosts once the build completes. Therefore, the repository credentials are already cached on the build server and you only need credentials for the build-server itself. You can check the status of the build [here](https://atlas.hashicorp.com/hyperledger/build-configurations/baseimage/) - -### Local Build Method - -"make [all]" will generate both a vagrant and docker image and push them out to the cloud. This method requires both ATLAS and DOCKERHUB credentials since the artifacts are pushed directly to the hosting providers from your build machine. - -## Uploading Permissions - -The system relies on several environment variables to establish credentials with the hosting repositories: - -* ATLAS_TOKEN - used to push both vagrant images and packer templates to atlas.hashicorp.com -* DOCKERHUB_[EMAIL|USERNAME|PASSWORD] - used to push docker images to hub.docker.com - -Note that if you only plan on pushing the build to the atlas packer build service, you only need the ATLAS_TOKEN set as the dockerhub interaction will occur from the atlas side of the process where the docker credentials are presumably already configured. - -## Versioning - -Vagrant boxes are only versioned when they are submitted to a repository. Vagrant does not support applying a version to a vagrant box via the `vagrant box add` command. Adding the box gives it an implicit version of 0. Setting `USE_LOCAL_BASEIMAGE=true` in the `vagrant up` command causes the Vagrant file in the the parent directory to pick version 0, instead of the default. diff --git a/images/base/http/preseed.cfg b/images/base/http/preseed.cfg deleted file mode 100644 index d3619ce76f7..00000000000 --- a/images/base/http/preseed.cfg +++ /dev/null @@ -1,34 +0,0 @@ -debconf debconf/frontend select Noninteractive -choose-mirror-bin mirror/http/proxy string -d-i clock-setup/utc boolean true -d-i clock-setup/utc-auto boolean true -d-i finish-install/reboot_in_progress note -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean true -d-i partman-auto/choose_recipe select atomic -d-i partman-auto/method string regular -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true -d-i partman/confirm_write_new_label boolean true - -# Set the kernel -d-i base-installer/kernel/override-image string linux-virtual - -# Default user -d-i passwd/user-fullname string vagrant -d-i passwd/username string vagrant -d-i passwd/user-password password vagrant -d-i passwd/user-password-again password vagrant -d-i passwd/username string vagrant - -# Minimum packages (see postinstall.sh) -d-i pkgsel/include string openssh-server -d-i pkgsel/install-language-support boolean false -d-i pkgsel/update-policy select none -d-i pkgsel/upgrade select none - -d-i time/zone string UTC -d-i user-setup/allow-password-weak boolean true -d-i user-setup/encrypt-home boolean false -tasksel tasksel/first multiselect standard, server diff --git a/images/base/images/packer-overview.graffle/data.plist b/images/base/images/packer-overview.graffle/data.plist deleted file mode 100644 index c6fc4f1b778..00000000000 --- a/images/base/images/packer-overview.graffle/data.plist +++ /dev/null @@ -1,1383 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGraffle - 139.18.0.187838 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {575.99998474121094, 1466}} - Class - SolidGraphic - ID - 2 - Style - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - BaseZoom - 0 - CanvasOrigin - {0, 0} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2016-02-22 22:11:37 +0000 - Creator - Greg Haskins - DisplayScale - 1 0/72 in = 1.0000 in - GraphDocumentVersion - 8 - GraphicsList - - - Class - LineGraphic - Head - - ID - 22 - - ID - 60 - Points - - {310.14783447722266, 96.412900484504561} - {373.3780487804878, 189} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 4 - - - - Class - LineGraphic - Head - - ID - 57 - - ID - 59 - OrthogonalBarAutomatic - - OrthogonalBarPoint - {0, 0} - OrthogonalBarPosition - 76.855720520019531 - Points - - {245.3613785038917, 96.388558716933716} - {108, 266} - {181, 753} - {314.5000004554858, 752.81861412981596} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - LineType - 2 - TailArrow - 0 - Width - 2 - - - Tail - - ID - 4 - - - - Class - LineGraphic - Head - - ID - 57 - - ID - 58 - Points - - {364.99937473792534, 505.12549205971999} - {364.99580448417515, 724.24999998950307} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Bounds - {{315, 724.75}, {100, 56}} - Class - ShapedGraphic - ID - 57 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 custom environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 30 - - ID - 56 - Points - - {388.73435760680411, 486.3256814805485} - {439.2620074228966, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Class - LineGraphic - Head - - ID - 40 - - ID - 55 - Points - - {342.97352711499752, 487.69524871963358} - {300.58818602417603, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Bounds - {{317.5, 428.875}, {95, 75.75}} - Class - ShapedGraphic - ID - 54 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 hyperledger fabric} - VerticalPad - 0 - - - - Class - LineGraphic - Head - - ID - 49 - - ID - 53 - Points - - {464.62462462462463, 569.5} - {464.82882888295114, 603.50000901136877} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 30 - - - - Bounds - {{433, 622}, {100, 56}} - Class - ShapedGraphic - ID - 51 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Bounds - {{424, 613}, {100, 56}} - Class - ShapedGraphic - ID - 50 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Bounds - {{415, 604}, {100, 56}} - Class - ShapedGraphic - ID - 49 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 30 - - ID - 48 - Points - - {508.88537781669504, 503.46248035776267} - {484.83665338645471, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 46 - - - - Class - LineGraphic - Head - - ID - 30 - - ID - 47 - Points - - {466.75138277659562, 426.49991523085737} - {464.88213627992633, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 35 - - - - Bounds - {{495, 467.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 46 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{486, 458.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 45 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{477, 449.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 44 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Class - LineGraphic - Head - - ID - 24 - - ID - 43 - Points - - {277.52222222222446, 574} - {279.23335524784636, 612.50049307640984} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 40 - - - - Bounds - {{230.5, 613}, {100, 56}} - Class - ShapedGraphic - ID - 24 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 development environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 40 - - ID - 41 - Points - - {276.50000301611595, 426.5} - {276.50000301611595, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 33 - - - - Bounds - {{202, 528}, {149, 46}} - Class - ShapedGraphic - ID - 40 - ImageID - 6 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{436, 326.75}, {62, 46}} - Class - ShapedGraphic - ID - 38 - ImageID - 5 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Class - LineGraphic - Head - - ID - 35 - - ID - 37 - Points - - {467.00000967504548, 372.75} - {467.00000967504548, 399.5} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 38 - - - - Class - LineGraphic - Head - - ID - 33 - - ID - 36 - Points - - {276.49991756300551, 370.5} - {276.49980234985657, 399.5} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 28 - - - - Bounds - {{411, 400}, {112, 26}} - Class - ShapedGraphic - ID - 35 - Shape - Rectangle - Style - - stroke - - CornerRadius - 9 - - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker:hyperledger/fabric-baseimage} - - - - Bounds - {{220.5, 400}, {112, 26}} - Class - ShapedGraphic - ID - 33 - Shape - Rectangle - Style - - stroke - - CornerRadius - 9 - - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 vagrant:hyperledger/fabric-baseimage} - - - - Class - LineGraphic - Head - - ID - 38 - - ID - 32 - Points - - {408.21374764707127, 245} - {454.09227872378818, 326.75} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 22 - - - - Class - LineGraphic - Head - - ID - 28 - - ID - 31 - Points - - {368.03295668549907, 245} - {294.63182674199624, 329} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 22 - - - - Bounds - {{390, 528}, {149, 41.5}} - Class - ShapedGraphic - ID - 30 - ImageID - 4 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{199, 329}, {155, 41.5}} - Class - ShapedGraphic - ID - 28 - ImageID - 3 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Class - LineGraphic - Head - - ID - 22 - - ID - 26 - Points - - {435.32867419331808, 159.90087452433639} - {413.63683982367951, 189} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 20 - - - - Bounds - {{390, 103.5}, {133, 56}} - Class - ShapedGraphic - ID - 20 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 hyperledger/\ -fabric-baseimage} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{331, 189}, {123, 56}} - Class - ShapedGraphic - ID - 22 - ImageID - 2 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{342, 18}, {73, 59}} - Class - ShapedGraphic - ID - 10 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 ubuntu} - VerticalPad - 0 - - - - Bounds - {{264, 18}, {73, 59}} - Class - ShapedGraphic - ID - 9 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 nodejs} - VerticalPad - 0 - - - - Bounds - {{186, 18}, {73, 59}} - Class - ShapedGraphic - ID - 8 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 grpc} - VerticalPad - 0 - - - - Bounds - {{108, 18}, {73, 59}} - Class - ShapedGraphic - ID - 7 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 golang} - VerticalPad - 0 - - - - Bounds - {{30, 18}, {73, 59}} - Class - ShapedGraphic - ID - 6 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 rocksdb} - VerticalPad - 0 - - - - Bounds - {{12, 10}, {537, 86}} - Class - ShapedGraphic - ID - 4 - Shape - Rectangle - Style - - fill - - Draws - NO - - stroke - - CornerRadius - 9 - Pattern - 1 - - - Text - - Align - 2 - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qr - -\f0\fs22 \cf0 Upstream FOSS} - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 7 - ImageLinkBack - - - - - - - - ImageList - - image6.png - image5.png - image4.png - image3.png - image2.png - - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2016-04-08 16:20:56 +0000 - Modifier - Greg Haskins - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - PageBreaks - YES - PrintInfo - - NSBottomMargin - - float - 41 - - NSHorizonalPagination - - coded - BAtzdHJlYW10eXBlZIHoA4QBQISEhAhOU051bWJlcgCEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAFxlwCG - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {611.99998474121094, 792} - - NSPrintReverseOrientation - - int - 0 - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 2 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - - name - Canvas 1 - - - Frame - {{69, 50}, {1406, 1115}} - ListView - - OutlineWidth - 142 - RightSidebar - - ShowRuler - - Sidebar - - SidebarWidth - 120 - VisibleRegion - {{-340, 0}, {1257, 960}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 1 - 1 - - - - - diff --git a/images/base/images/packer-overview.graffle/image2.png b/images/base/images/packer-overview.graffle/image2.png deleted file mode 100644 index 7e5bdd51c01..00000000000 Binary files a/images/base/images/packer-overview.graffle/image2.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image3.png b/images/base/images/packer-overview.graffle/image3.png deleted file mode 100644 index 437a535b657..00000000000 Binary files a/images/base/images/packer-overview.graffle/image3.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image4.png b/images/base/images/packer-overview.graffle/image4.png deleted file mode 100644 index 045093e2b58..00000000000 Binary files a/images/base/images/packer-overview.graffle/image4.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image5.png b/images/base/images/packer-overview.graffle/image5.png deleted file mode 100644 index 0a367e5d9dc..00000000000 Binary files a/images/base/images/packer-overview.graffle/image5.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image6.png b/images/base/images/packer-overview.graffle/image6.png deleted file mode 100644 index efbf05abd22..00000000000 Binary files a/images/base/images/packer-overview.graffle/image6.png and /dev/null differ diff --git a/images/base/images/packer-overview.png b/images/base/images/packer-overview.png deleted file mode 100644 index 66c9087c63b..00000000000 Binary files a/images/base/images/packer-overview.png and /dev/null differ diff --git a/images/base/packer.json b/images/base/packer.json deleted file mode 100644 index 03f9f3a8dce..00000000000 --- a/images/base/packer.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "push": { - "name": "", - "vcs": true - }, - "variables": { - "artifact": "baseimage", - "release": "{{env `BASEIMAGE_RELEASE`}}", - "dockerhub_email": "{{env `DOCKERHUB_EMAIL`}}", - "dockerhub_username": "{{env `DOCKERHUB_USERNAME`}}", - "dockerhub_password": "{{env `DOCKERHUB_PASSWORD`}}" - }, - "provisioners": [ - { - "type": "shell", - "environment_vars": ["BASEIMAGE_RELEASE={{user `release`}}"], - "override": { - "virtualbox-iso": { - "scripts": [ - "scripts/common/init.sh", - "scripts/vagrant/init.sh", - "scripts/vagrant/virtualbox.sh", - "scripts/vagrant/vagrant.sh", - "scripts/common/setup.sh", - "scripts/vagrant/cleanup.sh", - "scripts/vagrant/zerodisk.sh" - ], - "execute_command": "echo 'vagrant'|sudo -S {{.Vars}} bash '{{.Path}}'" - } - } - } - ], - "builders": [ - { - "type": "virtualbox-iso", - "boot_command": [ - "", - "", - "", - "/install/vmlinuz", - " auto", - " console-setup/ask_detect=false", - " console-setup/layoutcode=us", - " console-setup/modelcode=pc105", - " debian-installer=en_US", - " fb=false", - " initrd=/install/initrd.gz", - " kbd-chooser/method=us", - " keyboard-configuration/layout=USA", - " keyboard-configuration/variant=USA", - " locale=en_US", - " netcfg/get_hostname=ubuntu-1404", - " netcfg/get_domain=vagrantup.com", - " noapic", - " preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg", - " -- ", - "" - ], - "headless": true, - "boot_wait": "10s", - "disk_size": 40960, - "guest_os_type": "Ubuntu_64", - "http_directory": "http", - "iso_checksum": "ca2531b8cd79ea5b778ede3a524779b9", - "iso_checksum_type": "md5", - "iso_url": "http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04.1-server-amd64.iso", - "ssh_username": "vagrant", - "ssh_password": "vagrant", - "ssh_port": 22, - "ssh_wait_timeout": "10000s", - "shutdown_command": "echo '/sbin/halt -h -p' > shutdown.sh; echo 'vagrant'|sudo -S bash 'shutdown.sh'", - "virtualbox_version_file": ".vbox_version", - "guest_additions_mode": "disable", - "hard_drive_interface": "sata", - "vboxmanage": [ - ["modifyvm", "{{.Name}}", "--vrde", "off"], - ["modifyvm", "{{.Name}}", "--pae", "off"], - ["modifyvm", "{{.Name}}", "--paravirtprovider", "legacy"] - ] - } - ], - "post-processors": [ - [ - { - "type": "vagrant", - "only": [ - "virtualbox-iso" - ], - "keep_input_artifact": false - }, - { - "type": "atlas", - "only": [ - "virtualbox-iso" - ], - "artifact": "hyperledger/{{user `artifact`}}", - "artifact_type": "vagrant.box", - "metadata": { - "provider": "virtualbox", - "version": "{{user `release`}}" - } - } - ] - ] -} diff --git a/images/base/release b/images/base/release deleted file mode 100644 index 2cfabea2f1e..00000000000 --- a/images/base/release +++ /dev/null @@ -1 +0,0 @@ -0.0.11 diff --git a/images/base/scripts/common/golang_crossCompileSetup.sh b/images/base/scripts/common/golang_crossCompileSetup.sh deleted file mode 100755 index 0d7468203d7..00000000000 --- a/images/base/scripts/common/golang_crossCompileSetup.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -helpme() -{ - cat </etc/profile.d/goroot.sh -export GOROOT=$GOROOT -export GOPATH=$GOPATH -export PATH=\$PATH:$GOROOT/bin:$GOPATH/bin -EOF - - -# Install NodeJS - -if [ x$MACHINE = xs390x ] -then - apt-get install --yes nodejs -elif [ x$MACHINE = xppc64le ] -then - apt-get install --yes nodejs -else - NODE_VER=0.12.7 - NODE_PACKAGE=node-v$NODE_VER-linux-x64.tar.gz - TEMP_DIR=/tmp - SRC_PATH=$TEMP_DIR/$NODE_PACKAGE - - # First remove any prior packages downloaded in case of failure - cd $TEMP_DIR - rm -f node*.tar.gz - wget --quiet https://nodejs.org/dist/v$NODE_VER/$NODE_PACKAGE - cd /usr/local && sudo tar --strip-components 1 -xzf $SRC_PATH -fi - -# Install GRPC - -# ---------------------------------------------------------------- -# NOTE: For instructions, see https://github.com/google/protobuf -# -# ---------------------------------------------------------------- - -# First install protoc -cd /tmp -wget --quiet https://github.com/google/protobuf/archive/v3.0.2.tar.gz -tar xpzf v3.0.2.tar.gz -cd protobuf-3.0.2 -apt-get install -y autoconf automake libtool curl make g++ unzip -apt-get install -y build-essential -./autogen.sh -# NOTE: By default, the package will be installed to /usr/local. However, on many platforms, /usr/local/lib is not part of LD_LIBRARY_PATH. -# You can add it, but it may be easier to just install to /usr instead. -# -# To do this, invoke configure as follows: -# -# ./configure --prefix=/usr -# -#./configure -./configure --prefix=/usr - -if [ x$MACHINE = xs390x ] -then - echo FIXME: protobufs wont compile on 390, missing atomic call -else - make - make check - make install -fi -export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH -cd ~/ - -# Install rocksdb -apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev -cd /tmp -git clone https://github.com/facebook/rocksdb.git -cd rocksdb -git checkout tags/v4.1 -if [ x$MACHINE = xs390x ] -then - echo There were some bugs in 4.1 for z/p, dev stream has the fix, living dangereously, fixing in place - sed -i -e "s/-march=native/-march=z196/" build_tools/build_detect_platform - sed -i -e "s/-momit-leaf-frame-pointer/-DDUMBDUMMY/" Makefile -elif [ x$MACHINE = xppc64le ] -then - echo There were some bugs in 4.1 for z/p, dev stream has the fix, living dangereously, fixing in place. - echo Below changes are not required for newer releases of rocksdb. - sed -ibak 's/ifneq ($(MACHINE),ppc64)/ifeq (,$(findstring ppc64,$(MACHINE)))/g' Makefile -fi - -PORTABLE=1 make shared_lib -INSTALL_PATH=/usr/local make install-shared -ldconfig -cd ~/ - -# Make our versioning persistent -echo $BASEIMAGE_RELEASE > /etc/hyperledger-baseimage-release - -# clean up our environment -apt-get -y autoremove -apt-get clean -rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/images/base/scripts/docker/init.sh b/images/base/scripts/docker/init.sh deleted file mode 100755 index 4e1e5c95c50..00000000000 --- a/images/base/scripts/docker/init.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -apt-get update -apt-get install -y wget diff --git a/images/base/scripts/vagrant/cleanup.sh b/images/base/scripts/vagrant/cleanup.sh deleted file mode 100644 index ddbc4b0b493..00000000000 --- a/images/base/scripts/vagrant/cleanup.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# Removing leftover leases and persistent rules -echo "cleaning up dhcp leases" -rm /var/lib/dhcp/* - -# Make sure Udev doesn't block our network -echo "cleaning up udev rules" -rm /etc/udev/rules.d/70-persistent-net.rules -mkdir /etc/udev/rules.d/70-persistent-net.rules -rm -rf /dev/.udev/ -rm /lib/udev/rules.d/75-persistent-net-generator.rules - -echo "Adding a 2 sec delay to the interface up, to make the dhclient happy" -echo "pre-up sleep 2" >> /etc/network/interfaces diff --git a/images/base/scripts/vagrant/init.sh b/images/base/scripts/vagrant/init.sh deleted file mode 100755 index 3c10c5a5b39..00000000000 --- a/images/base/scripts/vagrant/init.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -set -x - -# Install headers so that we may build the vbox drivers layer -apt-get install -y build-essential zlib1g-dev libssl-dev libreadline-gplv2-dev unzip - -sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=sudo' /etc/sudoers -sed -i -e 's/%sudo ALL=(ALL:ALL) ALL/%sudo ALL=NOPASSWD:ALL/g' /etc/sudoers - -# Tweak sshd to prevent DNS resolution (speed up logins) -echo 'UseDNS no' >> /etc/ssh/sshd_config - -# Remove 5s grub timeout to speed up booting -cat < /etc/default/grub -# If you change this file, run 'update-grub' afterwards to update -# /boot/grub/grub.cfg. - -GRUB_DEFAULT=0 -GRUB_TIMEOUT=0 -GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian` -GRUB_CMDLINE_LINUX_DEFAULT="quiet" -GRUB_CMDLINE_LINUX="debian-installer=en_US" -EOF - -update-grub diff --git a/images/base/scripts/vagrant/vagrant.sh b/images/base/scripts/vagrant/vagrant.sh deleted file mode 100755 index d43ab6351c1..00000000000 --- a/images/base/scripts/vagrant/vagrant.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e -set -x - -# Vagrant specific -date > /etc/vagrant_box_build_time - -# Installing vagrant keys -mkdir -pm 700 /home/vagrant/.ssh -wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub' -O /home/vagrant/.ssh/authorized_keys -chmod 0600 /home/vagrant/.ssh/authorized_keys -chown -R vagrant /home/vagrant/.ssh diff --git a/images/base/scripts/vagrant/virtualbox.sh b/images/base/scripts/vagrant/virtualbox.sh deleted file mode 100755 index a8b2aeb1531..00000000000 --- a/images/base/scripts/vagrant/virtualbox.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -x - -apt-get install --yes virtualbox-guest-utils diff --git a/images/base/scripts/vagrant/zerodisk.sh b/images/base/scripts/vagrant/zerodisk.sh deleted file mode 100644 index 35370d64a3d..00000000000 --- a/images/base/scripts/vagrant/zerodisk.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Zero out the free space to save space in the final image: -dd if=/dev/zero of=/EMPTY bs=1M -rm -f /EMPTY - -# Sync to ensure that the delete completes before this moves on. -sync -sync -sync diff --git a/images/src/Dockerfile.in b/images/src/Dockerfile.in index 5b94527b6e4..507220741e5 100644 --- a/images/src/Dockerfile.in +++ b/images/src/Dockerfile.in @@ -1,2 +1,2 @@ -FROM hyperledger/fabric-baseimage:latest +FROM hyperledger/fabric-baseimage:_BASE_TAG_ ADD gopath.tar.bz2 $GOPATH/src/github.com/hyperledger/fabric diff --git a/membersrvc/Dockerfile b/membersrvc/Dockerfile deleted file mode 100644 index f19aab67568..00000000000 --- a/membersrvc/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -from hyperledger/fabric-baseimage:latest -# Copy GOPATH src and install Peer -RUN mkdir -p /var/hyperledger/db -WORKDIR $GOPATH/src/github.com/hyperledger/fabric/ -COPY . . -WORKDIR membersrvc -RUN pwd -RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/membersrvc/membersrvc.yaml $GOPATH/bin -# RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install diff --git a/membersrvc/ca/ca_test.yaml b/membersrvc/ca/ca_test.yaml index 2dcc8bb2637..3b3e68a7587 100644 --- a/membersrvc/ca/ca_test.yaml +++ b/membersrvc/ca/ca_test.yaml @@ -240,9 +240,9 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH #timeout in millisecs for starting up a container and waiting for Register to come through. 1sec should be plenty for chaincode unit tests startuptimeout: 1000 diff --git a/mkdocs.yml b/mkdocs.yml index b81da566fdc..a9586af14fa 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -3,12 +3,15 @@ site_url: http://hyperledger-fabric.readthedocs.io theme: readthedocs repo_url: http://gerrit.hyperledger.org/r/fabric site_description: 'Welcome to the Hyperledger fabric documentation' +strict: true +theme_dir: 'docs/custom_theme' pages: - Home: index.md - Glossary: glossary.md - Protocol Spec: protocol-spec.md - Usecases: biz/usecases.md +- System Chaincode: SystemChaincode-noop.md - Installation and setup: - Chaincode or Application Developer Setup: Setup/Chaincode-setup.md @@ -22,9 +25,9 @@ pages: - Chaincode APIs: API/ChaincodeAPI.md - Core API: API/CoreAPI.md - CA API: API/MemberServicesAPI.md - - System Chaincode: SystemChaincodes/noop.md - Fabric Developer: + - v1.0 Preview: abstract_v1.md - Contributing: CONTRIBUTING.md - Getting an Account: Gerrit/lf-account.md - Gerrit: Gerrit/gerrit.md @@ -34,7 +37,7 @@ pages: - Maintainers: MAINTAINERS.md - Reviewing: Gerrit/reviewing.md - Changes: Gerrit/changes.md - - Style guides: +- Style guides: - Golang: Style-guides/go-style.md - FAQ: @@ -49,6 +52,19 @@ pages: - Attributes: tech/attributes.md - Best Practices: tech/best-practices.md +- NodeSDK: + - App-developer-env-setup: nodeSDK/app-developer-env-setup.md + - App-Overview: nodeSDK/app-overview.md + - Node-SDK-guide: nodeSDK/node-sdk-guide.md + - Node-SDK-indepth: nodeSDK/node-sdk-indepth.md + - Sample-Standalone-app: nodeSDK/sample-standalone-app.md + - Sample-web-app: nodeSDK/sample-web-app.md + +- Starter Kit: + - Starter Kit: starter/fabric-starter-kit.md + +- Releases: releases.md + markdown_extensions: - extra - tables diff --git a/peer/core.yaml b/peer/core.yaml index 034fd04f9b9..cfb071626dd 100644 --- a/peer/core.yaml +++ b/peer/core.yaml @@ -282,10 +282,9 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: @@ -416,7 +415,7 @@ security: enabled: false # TCerts pool configuration. Multi-thread pool can also be configured - # by multichannel option switching concurrency in communication with TCA. + # by multichannel option switching concurrency in communication with TCA. multithreading: enabled: false multichannel: false diff --git a/protos/devops.proto b/protos/devops.proto index 4c578609bb3..9f8cf4b067c 100644 --- a/protos/devops.proto +++ b/protos/devops.proto @@ -50,10 +50,8 @@ service Devops { // Execute a transaction with a specific binding rpc EXP_ExecuteWithBinding(ExecuteWithBinding) returns (Response) {} - } - // Secret is a temporary object to establish security with the Devops. // A better solution using certificate will be introduced later message Secret { @@ -69,7 +67,7 @@ message SigmaInput { message ExecuteWithBinding { ChaincodeInvocationSpec chaincodeInvocationSpec = 1; - bytes binding = 2; + bytes binding = 2; } message SigmaOutput { @@ -78,7 +76,6 @@ message SigmaOutput { bytes asn1Encoding = 3; } - message BuildResult { enum StatusCode { diff --git a/scripts/provision/common.sh b/scripts/provision/common.sh deleted file mode 100755 index 0efe7e63570..00000000000 --- a/scripts/provision/common.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Add any logic that is common to both the peer and docker environments here - -apt-get update -qq - -# Used by CHAINTOOL -apt-get install -y default-jre diff --git a/scripts/provision/docker.sh b/scripts/provision/docker.sh deleted file mode 100755 index 8ed21a7ca14..00000000000 --- a/scripts/provision/docker.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# --------------------------------------------------------------------------- -# Install the hyperledger/fabric-baseimage docker environment -# --------------------------------------------------------------------------- -# -# There are some interesting things to note here: -# -# 1) Note that we take the slightly unorthodox route of _not_ publishing -# a "latest" tag to dockerhub. Rather, we only publish specifically -# versioned images and we build the notion of "latest" here locally -# during provisioning. This is because the notion of always -# pulling the latest/greatest from the net doesn't really apply to us; -# we always want a coupling between the fabric and the docker environment. -# At the same time, requiring each and every Dockerfile to pull a specific -# version adds overhead to the Dockerfile generation logic. Therefore, -# we employ a hybrid solution that capitalizes on how docker treats the -# "latest" tag. That is, untagged references implicitly assume the tag -# "latest" (good for simple Dockerfiles), but will satisfy the tag from -# the local cache before going to the net (good for helping us control -# what "latest" means locally) -# -# A good blog entry covering the mechanism being exploited may be found here: -# -# http://container-solutions.com/docker-latest-confusion -# -# 2) A benefit of (1) is that we now have a convenient vehicle for performing -# JIT customizations of our docker image during provisioning just like we -# do for vagrant. For example, we can install new packages in docker within -# this script. We will capitalize on this in future patches. -# -# 3) Note that we do some funky processing of the environment (see "printenv" -# and "ENV" components below). Whats happening is we are providing a vehicle -# for allowing the baseimage to include environmental definitions using -# standard linux mechanisms (e.g. /etc/profile.d). The problem is that -# docker-run by default runs a non-login/non-interactive /bin/dash shell -# which omits any normal /etc/profile or ~/.bashrc type processing, including -# environment variable definitions. So what we do is we force the execution -# of an interactive shell and extract the defined environment variables -# (via "printenv") and then re-inject them (using Dockerfile::ENV) in a -# manner that will make them visible to a non-interactive DASH shell. -# -# This helps for things like defining things such as the GOPATH. -# -# An alternative would be to bake any Dockerfile::ENV items in during -# baseimage creation, but packer lacks the capability to do so, so this -# is a compromise. -# --------------------------------------------------------------------------- - -NAME=hyperledger/fabric-baseimage -RELEASE=`uname -m`-$1 -DOCKERHUB_NAME=$NAME:$RELEASE - -CURDIR=`dirname $0` - -docker inspect $DOCKERHUB_NAME 2>&1 > /dev/null -if [ "$?" == "0" ]; then - echo "BUILD-CACHE: exists!" - BASENAME=$DOCKERHUB_NAME -else - echo "BUILD-CACHE: Pulling \"$DOCKERHUB_NAME\" from dockerhub.." - docker pull $DOCKERHUB_NAME - docker inspect $DOCKERHUB_NAME 2>&1 > /dev/null - if [ "$?" == "0" ]; then - echo "BUILD-CACHE: Success!" - BASENAME=$DOCKERHUB_NAME - else - echo "BUILD-CACHE: WARNING - Build-cache unavailable, attempting local build" - (cd $CURDIR/../../images/base && make docker DOCKER_TAG=localbuild) - if [ "$?" != "0" ]; then - echo "ERROR: Build-cache could not be compiled locally" - exit -1 - fi - BASENAME=$NAME:localbuild - fi -fi - -# Ensure that we have the baseimage we are expecting -docker inspect $BASENAME 2>&1 > /dev/null -if [ "$?" != "0" ]; then - echo "ERROR: Unable to obtain a baseimage" - exit -1 -fi - -# any further errors should be fatal -set -e - -TMP=`mktemp -d` -DOCKERFILE=$TMP/Dockerfile - -LOCALSCRIPTS=$TMP/scripts -REMOTESCRIPTS=/hyperledger/scripts/provision - -mkdir -p $LOCALSCRIPTS -cp -R $CURDIR/* $LOCALSCRIPTS - -# extract the FQN environment and run our common.sh to create the :latest tag -cat < $DOCKERFILE -FROM $BASENAME -`for i in \`docker run -i $BASENAME /bin/bash -l -c printenv\`; -do - echo ENV $i -done` -COPY scripts $REMOTESCRIPTS -RUN $REMOTESCRIPTS/common.sh -RUN chmod a+rw -R /opt/gopath - -EOF - -[ ! -z "$http_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg http_proxy=$http_proxy" -[ ! -z "$https_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg https_proxy=$https_proxy" -[ ! -z "$HTTP_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg HTTP_PROXY=$HTTP_PROXY" -[ ! -z "$HTTPS_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg HTTPS_PROXY=$HTTPS_PROXY" -[ ! -z "$no_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg no_proxy=$no_proxy" -[ ! -z "$NO_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg NO_PROXY=$NO_PROXY" -docker build $DOCKER_ARGS_PROXY -t $NAME:latest $TMP - -rm -rf $TMP diff --git a/scripts/provision/host.sh b/scripts/provision/host.sh deleted file mode 100755 index e5a8aa22f48..00000000000 --- a/scripts/provision/host.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -CURDIR=`dirname $0` - -$CURDIR/common.sh - -# Install docker-compose -curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose -chmod +x /usr/local/bin/docker-compose - -# Install Python, pip, behave, nose -# -# install python-dev and libyaml-dev to get compiled speedups -apt-get install --yes python-dev -apt-get install --yes libyaml-dev - -apt-get install --yes python-setuptools -apt-get install --yes python-pip -pip install --upgrade pip -pip install behave -pip install nose - -# updater-server, update-engine, and update-service-common dependencies (for running locally) -pip install -I flask==0.10.1 python-dateutil==2.2 pytz==2014.3 pyyaml==3.10 couchdb==1.0 flask-cors==2.0.1 requests==2.4.3 - -# Python grpc package for behave tests -# Required to update six for grpcio -pip install --ignore-installed six -pip install --upgrade 'grpcio==0.13.1' - -# install ruby and apiaryio -#apt-get install --yes ruby ruby-dev gcc -#gem install apiaryio - -# Install Tcl prerequisites for busywork -apt-get install --yes tcl tclx tcllib - -# Install NPM for the SDK -apt-get install --yes npm - -# Install JDK 1.8 for Java chaincode development -add-apt-repository ppa:openjdk-r/ppa -y -apt-get update && apt-get install openjdk-8-jdk -y - -# Download Gradle and create sym link -wget https://services.gradle.org/distributions/gradle-2.12-bin.zip -P /tmp --quiet -unzip -q /tmp/gradle-2.12-bin.zip -d /opt && rm /tmp/gradle-2.12-bin.zip -ln -s /opt/gradle-2.12/bin/gradle /usr/bin - -# Download maven for supporting maven build in java chaincode -MAVEN_VERSION=3.3.9 -mkdir -p /usr/share/maven /usr/share/maven/ref -curl -fsSL http://apache.osuosl.org/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz \ - | tar -xzC /usr/share/maven --strip-components=1 \ - && ln -s /usr/share/maven/bin/mvn /usr/bin/mvn - -# Set the default JDK to 1.8 -update-java-alternatives -s java-1.8.0-openjdk-amd64 diff --git a/sdk/node/Makefile b/sdk/node/Makefile index 4a5cecac0d3..7637d6912ab 100644 --- a/sdk/node/Makefile +++ b/sdk/node/Makefile @@ -79,4 +79,4 @@ clean: @echo "[CLEAN]" -rm -rf node_modules -rm -rf doc - -find lib | grep -v "protos/google" | grep -v "hash.js" | xargs rm + -find lib -type f | grep -v "protos/google" | grep -v "hash.js" | xargs rm diff --git a/sdk/node/bin/run-unit-tests.sh b/sdk/node/bin/run-unit-tests.sh index a892f5cad85..9e6f332903c 100755 --- a/sdk/node/bin/run-unit-tests.sh +++ b/sdk/node/bin/run-unit-tests.sh @@ -82,6 +82,7 @@ runTests() { runAssetMgmtTests runAssetMgmtWithRolesTests runAssetMgmtWithDynamicRolesTests + runEventTests echo "End running tests in network mode" } @@ -227,6 +228,18 @@ runAssetMgmtWithDynamicRolesTests() { echo "END running asset management with dynamic roles tests" } +runEventTests() { + echo "BEGIN running event-tests ..." + preExample eventsender mycc5 + node $UNITTEST/event-tests.js + if [ $? -ne 0 ]; then + echo "ERROR running event-tests!" + NODE_ERR_CODE=1 + fi + postExample eventsender + echo "END running event-tests" +} + # start process # $1 is executable path with any args # $2 is the log file diff --git a/sdk/node/src/crypto.ts b/sdk/node/src/crypto.ts index 5b577d0d281..a905687d9a2 100644 --- a/sdk/node/src/crypto.ts +++ b/sdk/node/src/crypto.ts @@ -115,13 +115,13 @@ export class Crypto { * @params hashAlgorithm The hash algorithm ('SHA2' or 'SHA3') */ setHashAlgorithm(hashAlgorithm:string):void { - this.checkHashFunction(hashAlgorithm); + Crypto.checkHashFunction(hashAlgorithm); this.hashAlgorithm = hashAlgorithm; this.initialize(); } - generateNonce() { + static generateNonce() { return crypto.randomBytes(NonceSize); } @@ -305,11 +305,11 @@ export class Crypto { return decryptedBytes; } - aesKeyGen() { + static aesKeyGen() { return crypto.randomBytes(AESKeyLength); } - aesCFBDecryt(key, encryptedBytes) { + static aesCFBDecrypt(key, encryptedBytes) { var iv = crypto.randomBytes(IVLength); var aes = new aesjs.ModeOfOperation.cfb(key, iv, IVLength); @@ -336,13 +336,13 @@ export class Crypto { var decryptedBytes, unpaddedBytes; - decryptedBytes = this.CBCDecrypt(key, bytes); - unpaddedBytes = this.PKCS7UnPadding(decryptedBytes); + decryptedBytes = Crypto.CBCDecrypt(key, bytes); + unpaddedBytes = Crypto.PKCS7UnPadding(decryptedBytes); return unpaddedBytes; }; - aes256GCMDecrypt(key:Buffer, ct:Buffer) { + static aes256GCMDecrypt(key:Buffer, ct:Buffer) { let decipher = crypto.createDecipheriv('aes-256-gcm', key, ct.slice(0, GCMStandardNonceSize)); decipher.setAuthTag(ct.slice(ct.length - GCMTagSize)); let dec = decipher.update( @@ -361,7 +361,7 @@ export class Crypto { if (!info) info = ""; - var key = this.hkdf2(bytesToBits(new Buffer(ikm)), keyBitLength, bytesToBits(salt), info, this.hashFunctionKeyDerivation); + var key = Crypto.hkdf2(bytesToBits(new Buffer(ikm)), keyBitLength, bytesToBits(salt), info, this.hashFunctionKeyDerivation); return bitsToBytes(key); @@ -394,7 +394,7 @@ export class Crypto { throw new Error("Illegal level: " + this.securityLevel + " - must be either 256 or 384"); } - private checkHashFunction(hashAlgorithm: string) { + private static checkHashFunction(hashAlgorithm: string) { if (!_isString(hashAlgorithm)) throw new Error("Illegal Hash function family: " + hashAlgorithm + " - must be either SHA2 or SHA3"); @@ -405,7 +405,7 @@ export class Crypto { private initialize() { this.checkSecurityLevel(this.securityLevel); - this.checkHashFunction(this.hashAlgorithm); + Crypto.checkHashFunction(this.hashAlgorithm); this.suite = this.hashAlgorithm.toLowerCase() + '-' + this.securityLevel; if (this.securityLevel == CURVE_P_256_Size) { @@ -454,7 +454,7 @@ export class Crypto { * @param {Object} [Hash=sjcl.hash.sha256] The hash function to use. * @return {bitArray} derived key. */ - private hkdf2(ikm, keyBitLength, salt, info, Hash) { + private static hkdf2(ikm, keyBitLength, salt, info, Hash) { var hmac, key, i, hashLen, loops, curOut, ret = []; // Hash = Hash || sjcl.hash.sha256; @@ -496,7 +496,7 @@ export class Crypto { return sjcl.bitArray.clamp(ret, keyBitLength); } - private CBCDecrypt(key, bytes) { + private static CBCDecrypt(key, bytes) { debug('key length: ', key.length); debug('bytes length: ', bytes.length); var iv = bytes.slice(0, BlockSize); @@ -524,7 +524,6 @@ export class Crypto { start += BlockSize; end += BlockSize; } - ; decryptedBytes = Buffer.concat(decryptedBlocks); } @@ -539,7 +538,7 @@ export class Crypto { }; - private PKCS7UnPadding(bytes) { + private static PKCS7UnPadding(bytes) { //last byte is the number of padded bytes var padding = bytes.readUInt8(bytes.length - 1); diff --git a/sdk/node/src/hfc.ts b/sdk/node/src/hfc.ts index 00e1f97447e..002fa512e66 100644 --- a/sdk/node/src/hfc.ts +++ b/sdk/node/src/hfc.ts @@ -1010,7 +1010,7 @@ export class Member { if (err) return cb(err); self.enrollment = enrollment; // Generate queryStateKey - self.enrollment.queryStateKey = self.chain.cryptoPrimitives.generateNonce(); + self.enrollment.queryStateKey = crypto.Crypto.generateNonce() // Save state self.saveState(function (err) { @@ -1208,7 +1208,7 @@ export class TransactionContext extends events.EventEmitter { this.chain = member.getChain(); this.memberServices = this.chain.getMemberServices(); this.tcert = tcert; - this.nonce = this.chain.cryptoPrimitives.generateNonce(); + this.nonce = crypto.Crypto.generateNonce(); this.complete = false; this.timeoutId = null; } @@ -1526,7 +1526,7 @@ export class TransactionContext extends events.EventEmitter { var stateKey; if (transaction.pb.getType() == _fabricProto.Transaction.Type.CHAINCODE_DEPLOY) { // The request is for a deploy - stateKey = new Buffer(self.chain.cryptoPrimitives.aesKeyGen()); + stateKey = new Buffer(crypto.Crypto.aesKeyGen()); } else if (transaction.pb.getType() == _fabricProto.Transaction.Type.CHAINCODE_INVOKE ) { // The request is for an execute // Empty state key @@ -1603,7 +1603,7 @@ export class TransactionContext extends events.EventEmitter { ); debug('Decrypt Result [%s]', ct.toString('hex')); - return this.chain.cryptoPrimitives.aes256GCMDecrypt(key, ct); + return crypto.Crypto.aes256GCMDecrypt(key, ct); } /** @@ -2842,15 +2842,16 @@ export function newFileKeyValStore(dir:string):KeyValStore { /** * The ChainCodeCBE is used internal to the EventHub to hold chaincode event registration callbacks. */ -class ChainCodeCBE { +export class ChainCodeCBE { + // chaincode id ccid: string; - eventname: string; - payload: Uint8Array; + // event name regex filter + eventNameFilter: RegExp; + // callback function to invoke on successful filter match cb: Function; - constructor(ccid: string,eventname: string,payload: Uint8Array, cb: Function) { + constructor(ccid: string, eventNameFilter: string, cb: Function) { this.ccid = ccid; - this.eventname = eventname; - this.payload = payload; + this.eventNameFilter = new RegExp(eventNameFilter); this.cb = cb; } } @@ -2879,7 +2880,7 @@ export class EventHub { this.chaincodeRegistrants = new HashTable(); this.blockRegistrants = new Set(); this.txRegistrants = new HashTable(); - this.peeraddr = "localhost:7053"; + this.peeraddr = null; this.connected = false; } @@ -2893,6 +2894,7 @@ export class EventHub { public connect() { if (this.connected) return; + if (!this.peeraddr) throw Error("Must set peer address before connecting."); this.events = grpc.load(__dirname + "/protos/events.proto" ).protos; this.client = new this.events.Events(this.peeraddr,grpc.credentials.createInsecure()); this.call = this.client.chat(); @@ -2902,11 +2904,15 @@ export class EventHub { let eh = this; // for callback context this.call.on('data', function(event) { if ( event.Event == "chaincodeEvent" ) { - var cbe = eh.chaincodeRegistrants.get(event.chaincodeEvent.chaincodeID + "/" + event.chaincodeEvent.eventName); - if ( cbe ) { - cbe.payload = event.chaincodeEvent.payload; - cbe.cb(cbe); - } + var cbtable = eh.chaincodeRegistrants.get(event.chaincodeEvent.chaincodeID); + if( !cbtable ) { + return; + } + cbtable.forEach(function (cbe) { + if ( cbe.eventNameFilter.test(event.chaincodeEvent.eventName)) { + cbe.cb(event.chaincodeEvent); + } + }); } else if ( event.Event == "block") { eh.blockRegistrants.forEach(function(cb){ cb(event.block); @@ -2928,19 +2934,35 @@ export class EventHub { this.connected = false; } - public registerChaincodeEvent(ccid: string, eventname: string, callback: Function){ + public registerChaincodeEvent(ccid: string, eventname: string, callback: Function): ChainCodeCBE { if (!this.connected) return; - let cb = new ChainCodeCBE(ccid, eventname, null, callback); - let register = { register: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: ccid , eventName: eventname }} ] }}; - this.chaincodeRegistrants.put(ccid + "/" + eventname, cb); - this.call.write(register); + let cb = new ChainCodeCBE(ccid, eventname, callback); + let cbtable = this.chaincodeRegistrants.get(ccid); + if ( !cbtable ) { + cbtable = new Set(); + this.chaincodeRegistrants.put(ccid, cbtable); + cbtable.add(cb); + let register = { register: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: ccid , eventName: "" }} ] }}; + this.call.write(register); + } else { + cbtable.add(cb); + } + return cb; } - public unregisterChaincodeEvent(ccid: string, eventname: string){ + public unregisterChaincodeEvent(cbe: ChainCodeCBE){ if (!this.connected) return; - var unregister = { unregister: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: ccid, eventName: eventname }} ] }}; - this.chaincodeRegistrants.remove(ccid + "/" + eventname); - this.call.write(unregister); + let cbtable = this.chaincodeRegistrants.get(cbe.ccid); + if ( !cbtable ) { + debug("No event registration for ccid %s ", cbe.ccid); + return; + } + cbtable.delete(cbe); + if( cbtable.size <= 0 ) { + var unregister = { unregister: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: cbe.ccid, eventName: "" }} ] }}; + this.chaincodeRegistrants.remove(cbe.ccid); + this.call.write(unregister); + } } public registerBlockEvent(callback:Function){ diff --git a/sdk/node/test/unit/event-tests.js b/sdk/node/test/unit/event-tests.js new file mode 100644 index 00000000000..ad045b57ebf --- /dev/null +++ b/sdk/node/test/unit/event-tests.js @@ -0,0 +1,516 @@ +/** + * Copyright London Stock Exchange 2016 All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var hfc = require('../..'); +var test = require('tape'); +var util = require('util'); +var fs = require('fs'); + +// +// Create a test chain +// + +var chain = hfc.newChain("testChain"); + +// +// Configure the test chain +// +// Set the directory for the local file-based key value store, point to the +// address of the membership service, and add an associated peer node. +// +// If the "tlsca.cert" file exists then the client-sdk will +// try to connect to the member services using TLS. +// The "tlsca.cert" is supposed to contain the root certificate (in PEM format) +// to be used to authenticate the member services certificate. +// + +chain.setKeyValStore(hfc.newFileKeyValStore('/tmp/keyValStore')); +if (fs.existsSync("tlsca.cert")) { + chain.setMemberServicesUrl("grpcs://localhost:7054", fs.readFileSync('tlsca.cert')); +} else { + chain.setMemberServicesUrl("grpc://localhost:7054"); +} +chain.addPeer("grpc://localhost:7051"); +chain.eventHubConnect("localhost:7053"); + +process.on('exit', function (){ + chain.eventHubDisconnect(); +}); + +// +// Set the chaincode deployment mode to either developent mode (user runs chaincode) +// or network mode (code package built and sent to the peer). +// + +var mode = process.env['DEPLOY_MODE']; +console.log("$DEPLOY_MODE: " + mode); +if (mode === 'dev') { + chain.setDevMode(true); +} else { + chain.setDevMode(false); +} + +// +// Configure test users +// +// Set the values required to register a user with the certificate authority. +// + +test_user1 = { + name: "WebApp_user1", + role: 1, // Client + affiliation: "bank_a" +}; + +// +// Declare variables to store the test user Member objects returned after +// registration and enrollment as they will be used across multiple tests. +// + +var test_user_Member1; + +// +// Declare test variables that will be used to store chaincode values used +// across multiple tests. +// + +// Path to the local directory containing the chaincode project under $GOPATH +var testChaincodePath = "github.com/eventsender/"; + +// Chaincode hash that will be filled in by the deployment operation or +// chaincode name that will be referenced in development mode. +var testChaincodeName = "mycc5"; + +// testChaincodeID will store the chaincode ID value after deployment. +var testChaincodeID; + +function getUser(name, cb) { + chain.getUser(name, function (err, user) { + if (err) return cb(err); + if (user.isEnrolled()) return cb(null, user); + // User is not enrolled yet, so perform both registration and enrollment + // The chain registrar is already set inside 'Set chain registrar' test + var registrationRequest = { + enrollmentID: name, + affiliation: "bank_a" + }; + user.registerAndEnroll(registrationRequest, function (err) { + if (err) cb(err, null) + cb(null, user) + }); + }); +} + +// +// Enroll the WebAppAdmin member and set as registrar then register +// and enroll new user with certificate authority +// This test is a prerequisite to further tests +// + +test('Set up chain prerequisites', function (t) { + t.plan(5); + + // Get the WebAppAdmin member + chain.getMember("WebAppAdmin", function (err, WebAppAdmin) { + if (err) { + t.fail("Failed to get WebAppAdmin member " + " ---> " + err); + t.end(err); + process.exit(1); + } else { + t.pass("Successfully got WebAppAdmin member" /*+ " ---> " + JSON.stringify(crypto)*/); + + // Enroll the WebAppAdmin member with the certificate authority using + // the one time password hard coded inside the membersrvc.yaml. + pw = "DJY27pEnl16d"; + WebAppAdmin.enroll(pw, function (err, crypto) { + if (err) { + t.fail("Failed to enroll WebAppAdmin member " + " ---> " + err); + t.end(err); + process.exit(1); + } else { + t.pass("Successfully enrolled WebAppAdmin member" /*+ " ---> " + JSON.stringify(crypto)*/); + + // Confirm that the WebAppAdmin token has been created in the key value store + path = chain.getKeyValStore().dir + "/member." + WebAppAdmin.getName(); + + fs.exists(path, function (exists) { + if (exists) { + t.pass("Successfully stored client token" /*+ " ---> " + WebAppAdmin.getName()*/); + } else { + t.fail("Failed to store client token for " + WebAppAdmin.getName() + " ---> " + err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } + chain.setRegistrar(WebAppAdmin); + // Register and enroll test_user + getUser(test_user1.name, function (err, user) { + if (err) { + t.fail("Failed to get " + test_user1.name + " ---> ", err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } else { + test_user_Member1 = user; + + t.pass("Successfully registered and enrolled " + test_user_Member1.getName()); + + // Confirm that the user token has been created in the key value store + path = chain.getKeyValStore().dir + "/member." + test_user1.name; + fs.exists(path, function (exists) { + if (exists) { + t.pass("Successfully stored client token" /*+ " ---> " + test_user1.name*/); + } else { + t.fail("Failed to store client token for " + test_user1.name + " ---> " + err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } + }); + } + }); + }); + } + }); + } + }); +}); + +// +// Create and issue a chaincode deploy request by the test user, who was +// registered and enrolled in the UT above. Deploy a testing chaincode from +// a local directory in the user's $GOPATH. +// + +test('Deploy a chaincode by enrolled user', function (t) { + t.plan(1); + + // Construct the deploy request + var deployRequest = { + // Function to trigger + fcn: "init", + // Arguments to the initializing function + args: [] + }; + + if (mode === 'dev') { + // Name required for deploy in development mode + deployRequest.chaincodeName = testChaincodeName; + } else { + // Path (under $GOPATH) required for deploy in network mode + deployRequest.chaincodePath = testChaincodePath; + } + + // Trigger the deploy transaction + var deployTx = test_user_Member1.deploy(deployRequest); + + // the deploy complete is triggered as a result of a fabric deploy + // complete event automatically when a event source is connected + deployTx.on('complete', function (results) { + // Deploy request completed successfully + console.log(util.format("deploy results: %j", results)); + // Set the testChaincodeID for subsequent tests + testChaincodeID = results.chaincodeID; + console.log("testChaincodeID:" + testChaincodeID); + t.pass(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); + }); + deployTx.on('error', function (err) { + // Deploy request failed + t.fail(util.format("Failed to deploy chaincode: request=%j, error=%j", deployRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and event name +// + +test('Invoke chaincode and have it generate an event', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event + var regid = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.equal(event.payload.toString(), "Event 0," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); +// +// Issue a chaincode invoke to generate event and listen for the event +// on 2 registrations +// + +test('Invoke chaincode, have it generate an event, and receive event on 2 registrations', function (t) { + t.plan(3); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + var eventcount = 0; + + // register for chaincode event + var regid1 = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + eventcount++; + if (eventcount > 1) { + if (timeoutId) { + clearTimeout(timeoutId); + } + } + t.equal(event.payload.toString(), "Event 1," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid1); + }); + // register for chaincode event + var regid2 = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + eventcount++; + if (eventcount > 1) { + if (timeoutId) { + clearTimeout(timeoutId); + } + } + t.equal(event.payload.toString(), "Event 1," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid2); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(eventcount > 1) { + eh.unregisterChaincodeEvent(regid1); + eh.unregisterChaincodeEvent(regid2); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and wildcarded event name +// + +test('Generate chaincode event and receive it with wildcard', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event with wildcard event name + var regid = eh.registerChaincodeEvent(testChaincodeID, ".*", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.equal(event.payload.toString(), "Event 2," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and a bogus event name +// + +test('Generate an event that fails to be received', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event with bogus event name + var regid = eh.registerChaincodeEvent(testChaincodeID, "bogus", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.fail("Received chaincode event from bogus registration"); + eh.unregisterChaincodeEvent(regid); + process.exit(1); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.pass("Failed to receive chaincode event"); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// +// Create and issue a chaincode query request by the test user, who was +// registered and enrolled in the UT above. Query a chaincode for +// number of events generated. +// + +test('Query chaincode state for number of events sent', function (t) { + t.plan(1); + + // Construct the query request + var queryRequest = { + // Name (hash) required for query + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "query", + // Existing state variable to retrieve + args: [""] + }; + + // Trigger the query transaction + var queryTx = test_user_Member1.query(queryRequest); + + // Print the query results + queryTx.on('complete', function (results) { + var result = JSON.parse(results.result.toString()); + var count = parseInt(result.NoEvents); + t.equal(count, 4, "Successfully queried correct number of events generated."); + chain.eventHubDisconnect(); + }); + queryTx.on('error', function (err) { + // Query failed + t.fail(util.format("Failed to query chaincode state: request=%j, error=%j", queryRequest, err)); + t.end(err); + }); +}); + diff --git a/tools/busywork/bin/busy b/tools/busywork/bin/busy index 242c639a6c1..c7d87091d09 100755 --- a/tools/busywork/bin/busy +++ b/tools/busywork/bin/busy @@ -1,13 +1,13 @@ #!/usr/bin/tclsh # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,18 +19,18 @@ set usage { The 'busy' script runs commands on one or more peers in a busywork network, printing the salient portion of the response on stdout. 'busy' can be seen as -an easier-to-use version of the Hyperledger fabric command-line-interface -(CLI) in that 'busy' takes care of various kinds of bookkeeping and -boilerplate code behind the scenes. 'busy' is probably best used as a tool for -small scipts, or for issuing a few interactive commands to a peer -network. Alhough complex test scripts could be implemented entirely as 'busy' -calls, the performance of a test implemented this way would likely suffer from -the process-creation overhead of each 'busy' invocation. +an easier-to-use, enhanced version of the Hyperledger fabric +command-line-interface (CLI) in that 'busy' takes care of various kinds of +bookkeeping and boilerplate code behind the scenes. 'busy' is probably best +used as a tool for small scipts, or for issuing a few interactive commands to +a peer network. Alhough complex test scripts could be implemented entirely as +'busy' calls, the performance of a test implemented this way would likely +suffer from the process-creation overhead of each 'busy' invocation. If the 'busy' command targets a single peer (see below) then the response from -that peer is printed. If multiple peers are targeted, then the results are -collected and printed as a JSON object, where each result is keyed by the peer -name, e.g., +or for that peer is printed. If multiple peers are targeted, then the results +are collected and printed as a JSON object, where each result is keyed by the +peer name, e.g., { "vp0" : , @@ -39,8 +39,8 @@ name, e.g., "vp" : response> } -Use -json to force results from operations on a single peer to print as a JSON -object. +Include the -json option to force results from operations on a single peer to +print as a JSON object. 'busy' is only supported for peer networks described by a 'network' file in the BUSYWORK_HOME directory. The to target are named by the peer IDs @@ -76,6 +76,9 @@ The following command and argument forms are supported: ping + pid + ps + The 'network' and 'chaincodes' commands simply print the current 'network' and 'chaincodes' files respectively from the implied $BUSYWORK_HOME. @@ -110,6 +113,17 @@ queries fail. If the ping query succeeds then the output of the ping queries is returned. This function currently assumes that the chaincode implments a 'ping' query function with no parameters. +The 'pid' command simply returns the PID of each of the . + +The 'ps' command is used to obtain 'ps' information from one or more +peers. For each peer implied by the specification, the result returned +is the result from executing 'ps' as follows, where is the PID of each +of the : + + ps -p -ww --noheader -o + +Note that leading/trailing whitespace is removed from the result. + Examples: busy chaincodes @@ -124,6 +138,9 @@ Examples: busy ping "*" cc2 + busy pid vp0 + busy ps vp0 etime,cputime + Optional arguments, with defaults after the colon: -h | -help | --help : N/A @@ -149,12 +166,12 @@ Optional arguments, with defaults after the colon: The -waitFor option is supported for the 'deploy', 'invoke' and 'ping' commands only. The semantics are explained below in the section headed - "Semantics of -waitFor" + "Semantics of -waitFor" -json : See below Select -json to force even single-peer operations to print as a JSON - object, rather than simply as a value. This opiton is ignored for the + object, rather than simply as a value. This option is ignored for the 'chaincodes' and 'network' commands. @@ -191,6 +208,14 @@ proc singletonCommand {cmd} { } +proc fixedArgs {cmd nArgs} { + + if {[llength [parms args]] != $nArgs} { + errorExit "The '$cmd' command expects $nArgs arguments." + } +} + + proc chaincodes {} { singletonCommand chaincodes @@ -294,7 +319,7 @@ proc invoke {} { "Deployed IDs are $a(ids)" } set name $a($ccId.name) - + if {![null [parms waitFor]]} { set heights \ [mapeach address [parms restAddresses] { @@ -322,7 +347,7 @@ proc invoke {} { } } - + proc query {} { waitForNotOK query @@ -344,7 +369,7 @@ proc query {} { errorExit } set name $a($ccId.name) - + parms results \ [mapeach address [parms restAddresses] { return [fabric::query $address [parms user] $name $function $args] @@ -369,7 +394,7 @@ proc ping {} { errorExit } set name $a($ccId.name) - + proc _ping {name} { set results {} foreach address [parms restAddresses] { @@ -395,7 +420,33 @@ proc ping {} { } } - + +proc pid {} { + + waitForNotOK pid + fixedArgs pid 0 + + parms results [mapeach peer [parms peers] { + return [parms network.peer.$peer.pid] + }] +} + + +proc ps {} { + + waitForNotOK ps + fixedArgs ps 1 + + parms results [mapeach peer [parms peers] { + set pid [parms network.peer.$peer.pid] + if {[catch {exec ps -p $pid -ww --noheader -o [parms args]} result]} { + errorExit "Exec of 'ps' failed : $result" + } + return [string trim $result]; # Remove leading/trailing whitespace + }] +} + + ############################################################################ # The script ############################################################################ @@ -410,11 +461,11 @@ setLoggingLevel {} warn set options { {enum {-h -help --help} parms(help) 0 p_help} {key -home parms(home) {}} - {bool -user parms(user) {} p_user} + {key -user parms(user) {} p_user} {key -waitFor parms(waitFor) {}} {bool -json parms(json) 0} } - + mapKeywordArgs $argv $options parms(other) if {$p_help} { @@ -430,7 +481,7 @@ parms command [first [parms other]] switch [parms command] { chaincodes {chaincodes} network {network} -} +} parms peers [busywork::peersFromSpec [second [parms other]]] parms args [restn [parms other] 2] @@ -459,6 +510,8 @@ switch [parms command] { invoke {invoke} query {query} ping {ping} + pid {pid} + ps {ps} default {errorExit "Unrecognized command: [parms command]"} } @@ -489,9 +542,3 @@ if {[parms json] || [expr {[llength [parms results]] > 1}]} { puts [first [parms results]] } - - - - - - diff --git a/tools/busywork/bin/pprofClient b/tools/busywork/bin/pprofClient index 888c06f6a23..0e8df692184 100755 --- a/tools/busywork/bin/pprofClient +++ b/tools/busywork/bin/pprofClient @@ -1,13 +1,13 @@ #!/usr/bin/tclsh # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -66,7 +66,7 @@ of the profile files - they are named by the host:port of the profiling port, and given sequence numbers to make them unique. When this script is run against busywork networks, the final act of the script is to create symbolic links that substitute the host:port portion of the file names with the peer -names (vp[0,...N]). +names (vp[0,...N]). Optional arguments, with defaults after the colon: @@ -140,7 +140,7 @@ set options { {key -peers parms(peers) {} p_peers} {key -port parms(port) 6060} } - + mapKeywordArgs $argv $options parms(other) if {$p_help} { @@ -153,12 +153,12 @@ if {[parms quiet]} { } else { setLoggingLevel {} note } - + parms home [busywork::home [parms home]] # Handle implicit vs. explict peers. With implicit peers we can give the files # recognizable names. With explicit peers we can only use the host:port as the -# names. +# names. if {$p_peers} { @@ -175,34 +175,34 @@ if {$p_peers} { parms profileAddresses [parms ids] } else { - + # Implicit peers - + if {[catch {busywork::networkToArray ::parms network.} msg]} { errorExit $msg } - + switch [llength [parms other]] { - + 1 { # All peers - + parms service [parms other] parms ids [parms network.peer.ids] parms profileAddresses [parms network.peer.profileAddresses] } 2 { # Subset of peers - + set spec [first [parms other]] parms service [second [parms other]] - + if {[catch {enumerate $spec} result]} { errorExit \ "Error parsing peer specification : $result" } set nPeers [llength [parms network.peer.ids]] - + parms ids \ [mapeach n $result { if {$n > $nPeers} { @@ -214,7 +214,7 @@ if {$p_peers} { [mapeach n $result { return [lindex [parms network.peer.profileAddresses] $n] }] - + } default { puts $usage @@ -283,7 +283,8 @@ if {[waitPIDs $pids]} { # find the profile files that the tool just created and link the names given # by the tool with names based on the peer IDs. The Go pprof tool does not # support providing names for the profile files - it names them based on their -# http addresses using the form 'pprof..'. +# http addresses using the form 'pprof.[.].'. The form with the [.] was introduced in Go 1.7. # The renaming-via-links being done here is not perfect in every case, and can # cause confusion (but NOT lost files) if profiles from multiple runs are @@ -297,31 +298,41 @@ if {!$p_peers} { set idMap($target) $id } foreach file [glob pprof.*] { - if {[regexp {^pprof.([^:]+:\d+)(.*)(\d\d\d)(.*)} \ + set executable "" + if {[regexp {^pprof\.([^:]+:\d+)\.(.*)(\d\d\d)(.*)} \ $file match host a b c]} { - if {[info exists idMap($host)]} { - if {$p_tag} { - set b [parms tag] - } - set reName pprof.$host$a$b$c - set newName pprof.$idMap($host)$a$b$c - if {![file exists $newName]} { + + # We can't disambiguate the . form from a + # generic with a regular expression. So we need to + # check to see if one of the hosts we're interested in is a + # terminal suffix of the 'host' pulled form the regex match. + + foreach target [parms profileAddresses] { + if {[string match *$target $host]} { if {$p_tag} { - if {[catch {exec mv $file $reName} why]} { - errorExit "Can't rename $file to $reName : $why" - } - if {[catch {exec ln -sf $reName $newName} code]} { - errorExit \ - "Error linking $file to $newName : $code" - } - note {} "$newName -> $reName (from $file)" - } else { - if {[catch {exec ln -sf $file $newName} code]} { - errorExit \ - "Error linking $file to $newName : $code" + set b [parms tag] + } + set reName pprof.$host.$a$b$c + set newName pprof.$idMap($target).$a$b$c + if {![file exists $newName]} { + if {$p_tag} { + if {[catch {exec mv $file $reName} why]} { + errorExit "Can't rename $file to $reName : $why" + } + if {[catch {exec ln -sf $reName $newName} code]} { + errorExit \ + "Error linking $file to $newName : $code" + } + note {} "$newName -> $reName (from $file)" + } else { + if {[catch {exec ln -sf $file $newName} code]} { + errorExit \ + "Error linking $file to $newName : $code" + } + note {} "$newName -> $file" } - note {} "$newName -> $file" - } + } + break } } } diff --git a/tools/busywork/bin/userModeNetwork b/tools/busywork/bin/userModeNetwork index bdeab1ce601..9bdfc8e0a03 100755 --- a/tools/busywork/bin/userModeNetwork +++ b/tools/busywork/bin/userModeNetwork @@ -65,6 +65,11 @@ Optional arguments, with defaults after the colon: the 'membersrvc' server. Peer login credentials are obtained from the fabric/membersrvc.yaml file. +-privacy | -noPrivacy : -noPrivacy + + Controls whether privacy is enabled. This option only has effect if + -security is enabled. + -noops | -batch : -batch Use one of these options to select the consensus mode. The default @@ -194,6 +199,7 @@ set options { {key -home parms(home) {}} {key -interface parms(interface) {} p_interface} {bool {-security -noSecurity} parms(security) 0} + {bool {-privacy -noPrivacy} parms(privacy) 0} {enum {-noops -batch} parms(consensus) -batch} {bool {-profile -noProfile} parms(profile) 1} {enum {-pristine -clean -dirty} parms(clean) -pristine} @@ -360,6 +366,7 @@ puts $config " \"host\": \"local\"," puts $config " \"date\": \"[timestamp]\"," puts $config " \"createdBy\": \"userModeNetwork\"," puts $config " \"security\": \"[? [parms security] true false]\"," +puts $config " \"privacy\": \"[? [parms privacy] true false]\"," puts $config " \"consensus\": \"$CONSENSUS_TO_MODE([parms consensus])\"," puts $config " \"peerProfileServer\": \"[? [parms profile] true false]\"," @@ -454,6 +461,7 @@ foreach clause $peerMap { set ::env(CORE_SECURITY_ENROLLID) $user set ::env(CORE_SECURITY_ENROLLSECRET) $password set ::env(CORE_SECURITY_ENABLED) true + set ::env(CORE_SECURITY_PRIVACY) [? [parms privacy] true false] } else { set ::env(CORE_SECURITY_ENABLED) false } diff --git a/tools/busywork/counters/driver b/tools/busywork/counters/driver index 23af5a0cb0f..7015cbc7991 100755 --- a/tools/busywork/counters/driver +++ b/tools/busywork/counters/driver @@ -513,6 +513,20 @@ parms txDelay [durationToMs [parms txDelay]] parms peerDelay [durationToMs [parms peerDelay]] parms netDelay [durationToMs [parms netDelay]] +# We can collect run-time statistics if 1) We are running locally on Linux, +# and 2) if the driver was started from a busywork 'network' file. + +if {[parms remote] || + ![null [parms explicitPeers]] || + [catch {exec uname} os] || + ($os ne "Linux")} { + errorExit "'[parms remote]' '[null [parms explicitPeers]]' '$os'" + parms collectStats 0 +} else { + parms collectStats 1 +} + + ############################################################################ # Setup ############################################################################ @@ -904,6 +918,15 @@ proc clientRoutine {i_logger} { } +# Get peer stats before the client fork + +if {[parms collectStats]} { + foreach peer [parms network.peer.ids] pid [parms network.peer.pids] { + procPIDStat $pid ::parms stat.before.$peer. + } +} + + # Fork clients. The parent continues the script once all clients have exited; # clients run their driver routine and exit. @@ -946,14 +969,15 @@ if {$p_pprofClient} { # event of errors until the final agreement check. note {} "Waiting (indefinitely) for subprocesses to complete" -set t [time {set errors [waitPIDs $pids]} 1] +set startMs [clock milliseconds] +set errors [waitPIDs $pids] +set issueSec [expr ([clock milliseconds] - $startMs) / 1000.0] if {!$errors} { - set seconds [expr {[lindex $t 0] / 1e6}] - set rate [format %.2f [expr {[parms totalTransactions] / $seconds}]] + set rate [format %.2f [expr {[parms totalTransactions] / $issueSec}]] note {} \ - "Transaction rate : $rate per second " \ - "([parms totalTransactions] / $seconds)" + "Issue + interlock TX rate : $rate per second " \ + "([parms totalTransactions] / $issueSec)" } if {$p_pprofClient} { @@ -964,6 +988,33 @@ if {$p_pprofClient} { } } +# Print peer stats + +if {[parms collectStats]} { + if {[catch {exec getconf CLK_TCK} CLK_TCK]} { + error {} "Can't getconf CLK_TCK: $CLK_TCK" + } else { + set CLK_TCK $CLK_TCK.0; # Poor man's float() + note {} "Peer statistics (excluding initialization and deployment)" + note {} " Peer: CPU (sec) = User + System: Utilization: Threads" + note {} " -----------------------------------------------------" + foreach peer [parms network.peer.ids] pid [parms network.peer.pids] { + procPIDStat $pid ::parms stat.after.$peer. + set user \ + [expr {([parms stat.after.$peer.utime] - \ + [parms stat.before.$peer.utime]) / $CLK_TCK}] + set sys \ + [expr {([parms stat.after.$peer.stime] - \ + [parms stat.before.$peer.stime]) / $CLK_TCK}] + set cpu [expr {$user + $sys}] + set util [expr {$cpu * 100.0 / $issueSec}] + set threads [parms stat.after.$peer.num_threads] + note {} [format "%8s %8.2f %8.2f %8.2f %8.0f%% %10d" \ + $peer $cpu $user $sys $util $threads] + } + } +} + if {$errors && ![parms force]} { errorExit "Aborting due to client errors" } @@ -987,6 +1038,13 @@ if {[parms interlock] && ![parms noops]} { set heights [removeDuplicates $originalHeights] if {[llength $heights] != 1} { note {} " Observed block heights: $originalHeights" + } else { + note {} " Consensus block height: $heights" + set finalSec [expr ([clock milliseconds] - $::startMs) / 1000.0] + set rate [format %.2f [expr {[parms totalTransactions] / $finalSec}]] + note {} \ + "Fully committed TX rate : $rate per second " \ + "([parms totalTransactions] / $finalSec)" } return [expr {[llength $heights] == 1}] } diff --git a/tools/busywork/tcl/fabric.tcl b/tools/busywork/tcl/fabric.tcl index afe11a8dd6d..3a9b1e168fa 100644 --- a/tools/busywork/tcl/fabric.tcl +++ b/tools/busywork/tcl/fabric.tcl @@ -25,7 +25,7 @@ package provide fabric 0.0 namespace eval ::fabric {} ############################################################################ -# devops i_peer i_method i_query {i_retry 0} +# chaincode i_peer i_method i_query {i_retry 0} # Make a REST API 'devops' query. The i_peer is the full host:port # address. The i_method must be 'deploy', 'invoke' or 'query'. @@ -40,12 +40,12 @@ namespace eval ::fabric {} # exits. If the HTTP access fails then the call will exit with Tcl error{} and # the caller will presumably catch{} the error and do whatever is appropriate. -proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { +proc ::fabric::chaincode {i_peer i_method i_query {i_retry 0}} { for {set retry [math:::max $i_retry 0]} {$retry >= 0} {incr retry -1} { if {[catch { - ::http::geturl http://$i_peer/devops/$i_method -query $i_query + ::http::geturl http://$i_peer/chaincode -query $i_query } token]} { if {$i_retry < 0} { http::cleanup $token @@ -54,7 +54,7 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retrying after catastrophic HTTP error" } http::cleanup $token @@ -62,13 +62,13 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { } if {($retry == 0) && ($i_retry != 0)} { err fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retry limit ($i_retry) hit after " \ "catastrophic HTTP error : Aborting" } http::cleanup $token errorExit \ - "fabric::devops/$i_method $i_peer : ::http::geturl failed\n" \ + "fabric::chaincode $i_peer $i_method: ::http::geturl failed\n" \ $::errorInfo } @@ -80,12 +80,12 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retrying after HTTP error return" } if {($retry == 0) && ($i_retry != 0)} { err fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retry limit ($i_retry) hit after " \ "HTTP error return : Aborting" } @@ -93,48 +93,52 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { continue } err fabric \ - "FABRIC '$i_method' transaction to $i_peer failed " \ + "fabric::chaincode '$i_method' transaction to $i_peer failed " \ "with ncode = '[http::ncode $token]'; Aborting\n" httpErrorExit $token } set response [http::data $token] - set err [catch { + set fail [catch { set parse [json::json2dict $response] - set ok [dict get $parse OK] - switch $i_method { - deploy - - invoke { - set result [dict get $parse message] + set result [dict get $parse result] + set status [dict get $result status] + if {$status ne "OK"} { + error "Status not OK" } - query { - set result $ok - } - default { - error "Unrecognized method $i_method" - } - } - }] + set message [dict get $result message] + } err] - if {$err} { - err fabric \ - "FABRIC '$i_method' response from $i_peer " \ - "is malformed/unexpected" - httpErrorExit $token + if {$fail} { + + set msg \ + [concat \ + "fabric::chaincode '$i_method' response from $i_peer " \ + "is malformed/unexpected: $err"] + + if {$i_retry < 0} { + http::cleanup $token + error $msg + + } else { + + err fabric $msg + httpErrorExit $token + } } http::cleanup $token if {($i_retry >= 0) && ($retry != $i_retry)} { note fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Success after [expr {$i_retry - $retry}] HTTP retries" } break } - return $result + return $message } @@ -144,17 +148,17 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { # Deploy a GOLANG chaincode to the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is # non-empty then this will be a secure transaction. The constructor will apply -# i_fn to i_args. Note that i_args is a normal Tcl list. This routine will -# convert i_args into a JSON array, wrapping each element of i_args in double -# quotes. i_fn will also be properly quoted. +# i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { set template { - { - "type" : "GOLANG", + "jsonrpc" : "2.0", + "method" : "deploy", + "params" : { + "type": 1, "chaincodeID" : { "path" : "$i_chaincode" }, @@ -162,16 +166,17 @@ proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { "args" : [$args] }, "secureContext": "$i_user" - } + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] - - return [devops $i_peer deploy $query $i_retry] + set query [list [subst -nocommand $template]] + return [chaincode $i_peer deploy $query $i_retry] } + ############################################################################ # devModeDeploy i_peer i_user i_chaincode i_fn i_args {i_retry 0} @@ -179,13 +184,15 @@ proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { # mode. Here, the i_chaincode is a user-specified name. All of the other # arguments are otherwise the same as for deploy{}. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { set template { - { - "type" : "GOLANG", + "jsonrpc" : "2.0", + "method" : "deploy", + "params" : { + "type": 1, "chaincodeID" : { "name" : "$i_chaincode" }, @@ -193,14 +200,14 @@ proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} "args" : [$args] }, "secureContext": "$i_user" - } + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] return [devops $i_peer deploy $query $i_retry] - } ############################################################################ @@ -209,33 +216,32 @@ proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} # Invoke a GOLANG chaincode on the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is non-empty # then this will be a secure transaction. The i_chaincodeName is the hash used -# to identify the chaincode. The invocation will apply i_fn to i_args. Note -# that i_args is a normal Tcl list. This routine will convert i_args into a -# JSON array, wrapping each element of i_args in double quotes. i_fn will also -# be properly quoted. +# to identify the chaincode. The invocation will apply i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::invoke {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { set template { - { - "chaincodeSpec" : {"type" : "GOLANG", - "chaincodeID" : { - "name" : "$i_chaincodeName" - }, - "ctorMsg" : { - "args" : [$args] - }, - "secureContext": "$i_user" - } - } + "jsonrpc" : "2.0", + "method" : "invoke", + "params" : { + "type": 1, + "chaincodeID" : { + "name" : "$i_chaincodeName" + }, + "ctorMsg" : { + "args" : [$args] + }, + "secureContext": "$i_user" + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] - return [devops $i_peer invoke $query $i_retry] + return [chaincode $i_peer invoke $query $i_retry] } @@ -245,34 +251,32 @@ proc ::fabric::invoke {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { # Query a GOLANG chaincode on the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is non-empty # then this will be a secure transaction. The i_chaincodeName is the hash used -# to identify the chaincode. The query will apply i_fn to i_args. Note that -# i_args is a normal Tcl list. This routine will convert i_args into a JSON -# array, wrapping each element of i_args in double quotes. i_fn will also be -# properly quoted. The query result (currently assumed to be a string) is -# returned. +# to identify the chaincode. The query will apply i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::query {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { set template { - { - "chaincodeSpec" : {"type" : "GOLANG", - "chaincodeID" : { - "name" : "$i_chaincodeName" - }, - "ctorMsg" : { - "args" : [$args] - }, - "secureContext": "$i_user" - } - } + "jsonrpc" : "2.0", + "method" : "query", + "params" : { + "type": 1, + "chaincodeID" : { + "name" : "$i_chaincodeName" + }, + "ctorMsg" : { + "args" : [$args] + }, + "secureContext": "$i_user" + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] - return [devops $i_peer query $query $i_retry] + return [chaincode $i_peer query $query $i_retry] } @@ -298,11 +302,11 @@ proc ::fabric::height {i_peer {i_retry 0}} { "$i_peer /chain: ::http::geturl failed " \ "with $i_retry retries : $token" } - + if {[http::ncode $token] != 200} { - + # Failure - + if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ @@ -311,12 +315,12 @@ proc ::fabric::height {i_peer {i_retry 0}} { http::cleanup $token continue } - + err fabric \ "$i_peer /chain; REST API call failed with $i_retry retries" httpErrorExit $token } - + if {[catch {json::json2dict [http::data $token]} parse]} { err fabric "$i_peer /chain: JSON response does not parse: $parse" httpErrorExit $token @@ -430,7 +434,7 @@ proc ::fabric::caLogin {i_peer i_user i_secret} { ############################################################################ # argify i_fn i_args -# Convert old-style fn + args pair into a list of quoted base64 arguments with +# Convert old-style fn + args pair into a list of quoted arguments with # commas to satisfy the most recent JSON format of the REST API. This needs to # be done as a string (rather than as a list), otherwise it will be {} quoted # when substituted. @@ -438,11 +442,11 @@ proc ::fabric::caLogin {i_peer i_user i_secret} { proc ::fabric::argify {i_fn i_args} { set args [concat $i_fn $i_args] - set args64 "" + set jsonargs "" set comma "" foreach arg $args { - set args64 "$args64$comma\"[binary encode base64 $arg]\"" + set jsonargs "$jsonargs$comma\"$arg\"" set comma , } - return $args64 + return $jsonargs } diff --git a/tools/busywork/tcl/os.tcl b/tools/busywork/tcl/os.tcl index 32891b64ed9..b2c8af544b1 100644 --- a/tools/busywork/tcl/os.tcl +++ b/tools/busywork/tcl/os.tcl @@ -1,13 +1,13 @@ # os.tcl - Utilities related to the operating system functions # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -43,7 +43,7 @@ proc waitPIDs {pids {digest {}}} { set _digest($pid.why) $why set _digest($pid.rc) $rc set _digest($pid.ok) [expr {($why eq "EXIT") && ($rc == 0)}] - + switch $why { EXIT { if {$rc != 0} { @@ -67,3 +67,97 @@ proc waitPIDs {pids {digest {}}} { } return $rv } + + +############################################################################ +# procPIDStat i_pid o_array {i_prefix ""} + +# This procedure parses the result of executing + +# cat /proc/[i_pid]/stat + +# and stores the results in the array o_array keyed by name. See man proc(5) +# under /proc/[pid]/stat for an interpretation of the fields. The names are +# taken directly from the man page. The optional i_prefix can be specified to +# allow stats for multiple PIDs to be stored in the same array, using an +# indexing scheme chosen by the caller. + +# NB: This procedure only works on Linux, and will fail on other operating +# systems. + +# BUGS: Field 2 (comm) is the command name in parenthesis. If the file name of +# the command includes white space this will throw off the current parser. + +# Note: Copy/edited taken directly from the man page, which uses 1-based +# addressing. +array unset ::procPIDStatMap +foreach {index key} { + 1 pid + 2 comm + 3 state + 4 ppid + 5 pgrp + 6 session + 7 tty_nr + 8 tpgid + 9 flags + 10 minflt + 11 cminflt + 12 majflt + 13 cmajflt + 14 utime + 15 stime + 16 cutime + 17 cstime + 18 priority + 19 nice + 20 num_threads + 21 itrealvalue + 22 starttime + 23 vsize + 24 rss + 25 rsslim + 26 startcode + 27 endcode + 28 startstack + 29 kstkesp + 30 kstkeip + 31 signal + 32 blocked + 33 sigignore + 34 sigcatch + 35 wchan + 36 nswap + 37 cnswap + 38 exit_signal + 39 processor + 40 rt_priority + 41 policy + 42 delayacct_blkio_ticks + 43 guest_time + 44 cguest_time + 45 start_data + 46 end_data + 47 start_brk + 48 arg_start + 49 arg_end + 50 env_start + 51 env_end + 52 exit_code +} { + set ::procPIDStatMap([expr {$index - 1}]) $key +} + + +proc procPIDStat {i_pid o_array {i_prefix ""}} { + + upvar $o_array a + + if {[catch {exec cat /proc/$i_pid/stat} stat]} { + errorExit "Can not cat /proc/$i_pid/stat: $stat" + } + + foreach index [array names ::procPIDStatMap] { + set a($i_prefix$::procPIDStatMap($index)) [lindex $stat $index] + } +}