Does anybody see anything wrong with the winsock code... It''s a wrapper class around synchronous socket model... Look belowe to how I use it...
class Socket
{
public:
Socket() {};
Socket(char *pIPAddress, int pPort, int pTimeOut = 0) throw(WinsockException);
Socket(SOCKET pSocket) : sock(pSocket) {};
~Socket() {};
void write(char * pBuffer,int pBufferLength) throw(WinsockException);
int read(char *pBuffer, int pBufferLength) throw(WinsockException);
void closeSocket();
int getTimeOut() { return(timeOut); };
protected:
void setTimeOut(int pTimeOut) { timeOut = pTimeOut; };
private:
sockaddr_in serverAddress;
SOCKET sock;
int timeOut;
};
// Required include files.
#include <winsock2.h>
// Application include files.
#include "socket.h"
Socket::Socket(char *pIPAddress, int pPort, int pTimeOut)
{
int errorCode = 0;
setTimeOut(pTimeOut);
if((sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == INVALID_SOCKET)
{
errorCode = WSAGetLastError();
throw WinsockException("socket() failed.", errorCode);
}
int optionValue = getTimeOut();
if((optionValue != 0) || (optionValue != NULL))
{
// Set the socket send timeout.
if(setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&optionValue, sizeof(optionValue)) == SOCKET_ERROR)
{
errorCode = WSAGetLastError();
throw WinsockException("setsockopt() failed.", errorCode);
}
// Set the socket read timeout.
if(setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&optionValue, sizeof(optionValue)) == SOCKET_ERROR)
{
errorCode = WSAGetLastError();
throw WinsockException("setsockopt() failed.", errorCode);
}
}
memset(&serverAddress, 0, sizeof(serverAddress));
serverAddress.sin_family = AF_INET;
serverAddress.sin_addr.s_addr = inet_addr(pIPAddress);
serverAddress.sin_port = htons(pPort);
if(connect(sock, (struct sockaddr *) &serverAddress, sizeof(serverAddress)) == SOCKET_ERROR)
{
errorCode = WSAGetLastError();
throw WinsockException("connect() failed.", errorCode);
}
}
void Socket::write(char * pBuffer,int pBufferLength)
{
int errorCode = 0;
int bytesSent = 0;
bytesSent = send(sock, pBuffer, pBufferLength, 0);
if(bytesSent == SOCKET_ERROR)
{
errorCode = WSAGetLastError();
throw WinsockException("send() failed.", errorCode);
}
}
int Socket::read(char *pBuffer, int pBufferLength)
{
int errorCode = 0;
char *buffer = new char[pBufferLength];
int bytesReceived = 0;
bytesReceived = recv(sock, buffer, pBufferLength, 0);
if((bytesReceived == SOCKET_ERROR) || (bytesReceived == 0))
{
errorCode = WSAGetLastError();
throw WinsockException("recv() failed.", errorCode);
}
memcpy(pBuffer, buffer, bytesReceived);
return(bytesReceived);
}
void Socket::closeSocket()
{
closesocket(sock);
}
// Global socket
Socket gSock;
// In thread 1 (Connection)
{
Socket mySock("126.122.16.97", 99);
gSock = mySock;
}
// In thread 2 (Read)
{
gSock.read(...);
}
// In thread 3 (Write)
{
gSock.write(...);
}
// In some other thread or function
{
gSock.closeSocket();
}
Read and Writting works fine... The problem arises when connecting and closing the socket...
1- Sometimes the socket will claim it is connected, but no data seems to be going through to the server computer located somewhere around the world...
2- The server still think it is connected after gSock.closeSocket. Not allowing me to reconect, until it is reset...
Does this have to do with any of the code I have written above?