#include #include #include #include "constants.h" #include "Master.h" #include "Environment.h" #include "BulletObject.h" #include "SimpleAgent.h" #ifndef USE_REMOTE_GUI #define USE_REMOTE_GUI 0 #endif #if USE_REMOTE_GUI #include "RemoteGui.h" #else #include "LocalGui.h" #endif typedef struct { int argc; char **argv; Environment *env; } threadParam; int rank, nprocs = MAX_PROCS; MPI_Status status; Master *master; pthread_t guiThread, masterThread; void cleanup() { while (!master) ; master->stop(); while (1) ; } void *guiThreadCode(void *args) { threadParam *param = (threadParam*) args; char **argv = param->argv; int argc = param->argc; Environment *env = param->env; #if USE_REMOTE_GUI if (argc != 2) { fprintf(stderr, "Usage: %s \n", argv[0]); cleanup(); return NULL; } RemoteGui *rg = new RemoteGui(env); rg->init(atoi(param->argv[1])); rg->run(); cleanup(); #else atexit(cleanup); int ret = glutmain(argc, argv, env); if(ret < 0) { fprintf(stderr, "Error creating LocalGui\n"); cleanup(); return NULL; } #endif return NULL; } static void runMaster(int argc, char **argv) { /// Initialize the virtual world environment Environment *env = new Environment(); env->setupEnvironment(); env->createMagicCarpet(WIDTH, HEIGHT); /// Pack gui-thread parameters in structure threadParam param; param.argv = argv; param.argc = argc; param.env = env; /// Run pthread for gui object pthread_create(&guiThread, NULL, guiThreadCode, ¶m); master = new Master(env, nprocs); master->discoverAgents(); master->createProviders(); master->sendAll(); MPI_Barrier(MPI_COMM_WORLD); master->run(); } static void runSlave() { int size; vector sensorList; vector actuatorList; vector channelList; MPI_Comm_rank(MPI_COMM_WORLD, &rank); // send hello-message: mpi-thread rank MPI_Send(&rank, 1, MPI_INT, MASTER_ID, HELLO_MSG, MPI_COMM_WORLD); // wait to receive from master the number of channels to be created MPI_Recv(&size, 1, MPI_INT, 0, CHANNEL_MSG, MPI_COMM_WORLD, &status); // allocate buffer int intSize, floatSize; MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &intSize); MPI_Pack_size(1, MPI_FLOAT, MPI_COMM_WORLD, &floatSize); int bufSize = size * (4 * intSize + 3 * floatSize); char *buffer = (char*) malloc(bufSize); // wait to receive the list of channels parameters MPI_Recv(buffer, bufSize, MPI_PACKED, 0, CHANNEL_MSG, MPI_COMM_WORLD, &status); // debug info int n_sensors=0, n_actuators=0, n_channels=0; int i; int position = 0; //printf("RECV : %d - ", rank); for(i = 0; i < size; i++) { Sensor *x; Actuator *y; CommChannel *z; Channel *channel; channelData cparam; MPI_Unpack(buffer, bufSize, &position, &cparam.tag, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.type, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.orient, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.dest, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.minAngle, 1, MPI_FLOAT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.maxAngle, 1, MPI_FLOAT, MPI_COMM_WORLD); MPI_Unpack(buffer, bufSize, &position, &cparam.angle, 1, MPI_FLOAT, MPI_COMM_WORLD); //printf("%d ", cparam.tag); switch(cparam.type) { case SENSOR: channel = new MPIChannel(status.MPI_SOURCE, cparam.tag, MPI_COMM_WORLD); x = new Sensor(channel, &cparam); sensorList.push_back(x); n_sensors++; break; case ACTUATOR: channel = new MPIChannel(status.MPI_SOURCE, cparam.tag, MPI_COMM_WORLD); y = new Actuator(channel, &cparam); actuatorList.push_back(y); n_actuators++; break; case COMM_CHANNEL: channel = new MPIChannel(cparam.dest, cparam.tag, MPI_COMM_WORLD); z = new CommChannel(channel, cparam.orient); channelList.push_back(z); n_channels++; break; default: break; } } SimpleAgent *agent = new SimpleAgent(sensorList, actuatorList, channelList); int ret; MPI_Recv(&ret, 1, MPI_INT, 0, CHANNEL_MSG, MPI_COMM_WORLD, &status); printf("Agent %d [%d] is running with %d sensors, %d actuators and %d channels\n", rank, ret, n_sensors, n_actuators, n_channels); MPI_Barrier(MPI_COMM_WORLD); agent->run(); } int main(int argc, char **argv) { /// MPI initial stuff MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); /// Distribute the tasks if(rank == MASTER_ID) { printf("Starting SimeoEngine....\n"); runMaster(argc, argv); } else { runSlave(); } MPI_Finalize(); return 0; }