-- | Simple backend based on the TCP transport which offers node discovery-- based on UDP multicast. This is a zero-configuration backend designed to-- get you going with Cloud Haskell quickly without imposing any structure-- on your application.---- To simplify getting started we provide special support for /master/ and -- /slave/ nodes (see 'startSlave' and 'startMaster'). Use of these functions-- is completely optional; you can use the local backend without making use-- of the predefined master and slave nodes.-- -- [Minimal example]---- > import System.Environment (getArgs)-- > import Control.Distributed.Process-- > import Control.Distributed.Process.Node (initRemoteTable)-- > import Control.Distributed.Process.Backend.SimpleLocalnet-- > -- > master :: Backend -> [NodeId] -> Process ()-- > master backend slaves = do-- > -- Do something interesting with the slaves-- > liftIO . putStrLn $ "Slaves: " ++ show slaves-- > -- Terminate the slaves when the master terminates (this is optional)-- > terminateAllSlaves backend-- > -- > main :: IO ()-- > main = do-- > args <- getArgs-- > -- > case args of-- > ["master", host, port] -> do-- > backend <- initializeBackend host port initRemoteTable -- > startMaster backend (master backend)-- > ["slave", host, port] -> do-- > backend <- initializeBackend host port initRemoteTable -- > startSlave backend-- -- [Compiling and Running]---- Save to @example.hs@ and compile using-- -- > ghc -threaded example.hs---- Fire up some slave nodes (for the example, we run them on a single machine):---- > ./example slave localhost 8080 &-- > ./example slave localhost 8081 &-- > ./example slave localhost 8082 &-- > ./example slave localhost 8083 &---- And start the master node:---- > ./example master localhost 8084---- which should then output:---- > Slaves: [nid://localhost:8083:0,nid://localhost:8082:0,nid://localhost:8081:0,nid://localhost:8080:0]---- at which point the slaves should exit.---- To run the example on multiple machines, you could run---- > ./example slave 198.51.100.1 8080 &-- > ./example slave 198.51.100.2 8080 &-- > ./example slave 198.51.100.3 8080 &-- > ./example slave 198.51.100.4 8080 &---- on four different machines (with IP addresses 198.51.100.1..4), and run the-- master on a fifth node (or on any of the four machines that run the slave-- nodes).---- It is important that every node has a unique (hostname, port number) pair, -- and that the hostname you use to initialize the node can be resolved by-- peer nodes. In other words, if you start a node and pass hostname @localhost@-- then peer nodes won't be able to reach it because @localhost@ will resolve-- to a different IP address for them.{-# OPTIONS_GHC -fno-warn-orphans #-}moduleControl.Distributed.Process.Backend.SimpleLocalnet(-- * Initialization Backend(..),initializeBackend-- * Slave nodes,startSlave,terminateSlave,findSlaves,terminateAllSlaves-- * Master nodes,startMaster)whereimportSystem.IO(fixIO)importData.Maybe(catMaybes)importData.Binary(Binary(get,put),getWord8,putWord8)importData.Accessor(Accessor,accessor,(^:),(^.))importData.Set(Set)importqualifiedData.SetasSet(insert,empty,toList)importData.Foldable(forM_)importData.Typeable(Typeable)importControl.Applicative((<$>))importControl.Exception(throw)importControl.Monad(forever,forM)importControl.Monad.IO.Class(liftIO)importControl.Concurrent(forkIO,threadDelay,ThreadId)importControl.Concurrent.MVar(MVar,newMVar,readMVar,modifyMVar_)importControl.Distributed.Process(RemoteTable,NodeId,Process,WhereIsReply(..),whereis,whereisRemoteAsync,registerRemote,getSelfPid,register,expect,nsendRemote,receiveWait,matchIf,processNodeId)importqualifiedControl.Distributed.Process.NodeasNode(LocalNode,newLocalNode,localNodeId,runProcess)importqualifiedNetwork.Transport.TCPasNT(createTransport,defaultTCPParameters)importqualifiedNetwork.TransportasNT(Transport)importqualifiedNetwork.SocketasN(HostName,ServiceName,SockAddr)importControl.Distributed.Process.Backend.SimpleLocalnet.Internal.Multicast(initMulticast)-- | Local backend dataBackend=Backend{-- | Create a new local nodenewLocalNode::IONode.LocalNode-- | @findPeers t@ broadcasts a /who's there?/ message on the local-- network, waits 't' msec, and then collects and returns the answers.-- You can use this to dynamically discover peer nodes.,findPeers::Int->IO[NodeId]-- | Make sure that all log messages are printed by the logger on the-- current node,redirectLogsHere::Process()}dataBackendState=BackendState{_localNodes::[Node.LocalNode],_peers::SetNodeId,discoveryDaemon::ThreadId}-- | Initialize the backendinitializeBackend::N.HostName->N.ServiceName->RemoteTable->IOBackendinitializeBackendhostportrtable=domTransport<-NT.createTransporthostportNT.defaultTCPParameters(recv,send)<-initMulticast"224.0.0.99"99991024(_,backendState)<-fixIO$\~(tid,_)->dobackendState<-newMVarBackendState{_localNodes=[],_peers=Set.empty,discoveryDaemon=tid}tid'<-forkIO$peerDiscoveryDaemonbackendStaterecvsendreturn(tid',backendState)casemTransportofLefterr->throwerrRighttransport->letbackend=Backend{newLocalNode=apiNewLocalNodetransportrtablebackendState,findPeers=apiFindPeerssendbackendState,redirectLogsHere=apiRedirectLogsHerebackend}inreturnbackend-- | Create a new local nodeapiNewLocalNode::NT.Transport->RemoteTable->MVarBackendState->IONode.LocalNodeapiNewLocalNodetransportrtablebackendState=dolocalNode<-Node.newLocalNodetransportrtablemodifyMVar_backendState$return.(localNodes^:(localNode:))returnlocalNode-- | Peer discoveryapiFindPeers::(PeerDiscoveryMsg->IO())->MVarBackendState->Int->IO[NodeId]apiFindPeerssendbackendStatedelay=dosendPeerDiscoveryRequestthreadDelaydelaySet.toList.(^.peers)<$>readMVarbackendStatedataPeerDiscoveryMsg=PeerDiscoveryRequest|PeerDiscoveryReplyNodeIdinstanceBinaryPeerDiscoveryMsgwhereputPeerDiscoveryRequest=putWord80put(PeerDiscoveryReplynid)=putWord81>>putnidget=doheader<-getWord8caseheaderof0->returnPeerDiscoveryRequest1->PeerDiscoveryReply<$>get_->fail"PeerDiscoveryMsg.get: invalid"-- | Respond to peer discovery requests sent by other nodespeerDiscoveryDaemon::MVarBackendState->IO(PeerDiscoveryMsg,N.SockAddr)->(PeerDiscoveryMsg->IO())->IO()peerDiscoveryDaemonbackendStaterecvsend=forevergowherego=do(msg,_)<-recvcasemsgofPeerDiscoveryRequest->donodes<-(^.localNodes)<$>readMVarbackendStateforM_nodes$send.PeerDiscoveryReply.Node.localNodeIdPeerDiscoveryReplynid->modifyMVar_backendState$return.(peers^:Set.insertnid)---------------------------------------------------------------------------------- Back-end specific primitives ------------------------------------------------------------------------------------ | Make sure that all log messages are printed by the logger on this nodeapiRedirectLogsHere::Backend->Process()apiRedirectLogsHerebackend=domLogger<-whereis"logger"forM_mLogger$\logger->donids<-liftIO$findPeersbackend1000000forM_nids$\nid->registerRemotenid"logger"logger---------------------------------------------------------------------------------- Slaves ------------------------------------------------------------------------------------ | Messages to slave nodes---- This datatype is not exposed; instead, we expose primitives for dealing-- with slaves.dataSlaveControllerMsg=SlaveTerminatederiving(Typeable,Show)instanceBinarySlaveControllerMsgwhereputSlaveTerminate=putWord80get=doheader<-getWord8caseheaderof0->returnSlaveTerminate_->fail"SlaveControllerMsg.get: invalid"-- | Calling 'slave' sets up a new local node and then waits. You start-- processes on the slave by calling 'spawn' from other nodes.---- This function does not return. The only way to exit the slave is to CTRL-C-- the process or call terminateSlave from another node.startSlave::Backend->IO()startSlavebackend=donode<-newLocalNodebackendNode.runProcessnodeslaveController-- | The slave controller interprets 'SlaveControllerMsg'sslaveController::Process()slaveController=dopid<-getSelfPidregister"slaveController"pidgowherego=domsg<-expectcasemsgofSlaveTerminate->return()-- | Terminate the slave at the given node IDterminateSlave::NodeId->Process()terminateSlavenid=nsendRemotenid"slaveController"SlaveTerminate-- | Find slave nodesfindSlaves::Backend->Process[NodeId]findSlavesbackend=donodes<-liftIO$findPeersbackend1000000-- Fire of asynchronous requests for the slave controllerforM_nodes$\nid->whereisRemoteAsyncnid"slaveController"-- Wait for the repliescatMaybes<$>forMnodes(\_->receiveWait[matchIf(\(WhereIsReplylabel_)->label=="slaveController")(\(WhereIsReply_mPid)->return(processNodeId<$>mPid))])-- | Terminate all slavesterminateAllSlaves::Backend->Process()terminateAllSlavesbackend=doslaves<-findSlavesbackendforM_slavesterminateSlaveliftIO$threadDelay1000000---------------------------------------------------------------------------------- Master nodes---------------------------------------------------------------------------------- | 'startMaster' finds all slaves currently available on the local network-- (which should therefore be started first), redirects all log messages to-- itself, and then calls the specified process, passing the list of slaves-- nodes. ---- Terminates when the specified process terminates. If you want to terminate-- the slaves when the master terminates, you should manually call -- 'terminateAllSlaves'.startMaster::Backend->([NodeId]->Process())->IO()startMasterbackendproc=donode<-newLocalNodebackendNode.runProcessnode$doslaves<-findSlavesbackendredirectLogsHerebackendprocslaves---------------------------------------------------------------------------------- Accessors ----------------------------------------------------------------------------------localNodes::AccessorBackendState[Node.LocalNode]localNodes=accessor_localNodes(\nsst->st{_localNodes=ns})peers::AccessorBackendState(SetNodeId)peers=accessor_peers(\psst->st{_peers=ps})