網上的Windows環境下截屏的源代碼很多,但是看到的都是隻能適應單顯示器,無法截取桌面擴展到雙顯示器情況下的完整屏幕。過查找資料和摸索,發現問題的關鍵就在於正確獲得截屏的長寬尺寸。通常,計算屏幕實際大小可以用:
HWND hWnd = GetDesktopWindow();
RECT re;
GetWindowRect(window, &re);
int width = re.right, height = re.bottom;
上面的代碼可以獲得單顯示器縮放比例100%情況下的屏幕分辨率,但這只是虛尺寸,所以若是用戶將屏幕縮放比例設置爲超出100%,則會導致截屏區域的寬和高計算出錯,因而無法截取到完整的屏幕。代碼得改成如下所示:
void getPhysicalResolution(int& width, int& height)
{
// 獲取窗口當前顯示的監視器
// 使用桌面的句柄.
HWND hWnd = GetDesktopWindow();
HMONITOR hMonitor = MonitorFromWindow(hWnd, MONITOR_DEFAULTTONEAREST);
// 獲取監視器邏輯寬度與高度
MONITORINFOEX miex;
miex.cbSize = sizeof(miex);
GetMonitorInfo(hMonitor, &miex);
// 獲取監視器物理寬度與高度
DEVMODE dm;
dm.dmSize = sizeof(dm);
dm.dmDriverExtra = 0;
EnumDisplaySettings(miex.szDevice, ENUM_CURRENT_SETTINGS, &dm);
width = dm.dmPelsWidth;
height = dm.dmPelsHeight;
}
上述代碼可以不受Windows顯示縮放比例的影響,獲得屏幕的正確物理分辨率。
但是上述代碼只能獲取主屏的分辨率,無法獲得桌面擴展到第二顯示器後的完整桌面物理尺寸。解決的方法是調用Windows API中的EnumDisplayMonitors函數,枚舉連接到系統的所有顯示器,然後計算所有顯示器的分辨率累加和,從而得到總的桌面分辨率。代碼如下:
typedef struct __tagMonitorProperty
{
public:
long width, height;
long x, y;
HDC hdcMonitor;
HMONITOR hMonitor;
string monitorName;
bool primaryScreenFlag;
} MonitorProperty;
BOOL CALLBACK monitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData)
{
vector<MonitorProperty> *monitorProperties = (vector<MonitorProperty> *)dwData;
MonitorProperty monitorProperty;
monitorProperty.hMonitor = hMonitor;
monitorProperty.hdcMonitor = hdcMonitor;
MONITORINFOEX miex;
miex.cbSize = sizeof(miex);
GetMonitorInfo(hMonitor, &miex);
monitorProperty.monitorName = {miex.szDevice};
monitorProperty.primaryScreenFlag = (miex.dwFlags == MONITORINFOF_PRIMARY) ? true : false;
DEVMODE dm;
dm.dmSize = sizeof(dm);
dm.dmDriverExtra = 0;
EnumDisplaySettings(miex.szDevice, ENUM_CURRENT_SETTINGS, &dm);
monitorProperty.width = dm.dmPelsWidth;
monitorProperty.height = dm.dmPelsHeight;
monitorProperty.x = dm.dmPosition.x;
monitorProperty.y = dm.dmPosition.y;
(*monitorProperties).push_back(monitorProperty);
return TRUE;
}
void getMultiMonitorPhysicalSize(long& width, long& height)
{
vector<MonitorProperty> monitorProperties;
EnumDisplayMonitors(NULL, NULL, monitorEnumProc, (LPARAM)&monitorProperties);
long maxWidth = 0, maxHeight = 0;
for(MonitorProperty monitorProperty : monitorProperties)
{
maxWidth = (maxWidth < monitorProperty.width) ? monitorProperty.width : maxWidth;
maxHeight = (maxHeight < monitorProperty.height) ? monitorProperty.height : maxHeight;
}
MonitorProperty ms = monitorProperties[monitorProperties.size() - 1];
width = ms.x + ms.width;
height = ms.y + ms.height;
width = (width > maxWidth) ? width : maxWidth;
height = (height > maxHeight) ? height : maxHeight;
}
以上代碼的依據在於:Windows環境下,當桌面擴展到副顯示器上之後,副顯示器DEVMODE結構中的dmPosition結構中,其x、y取值都是從主顯示器的寬度和高度之後開始計算。例如,主顯示器的分辨率是1920*1080,若副顯示器桌面是橫向擴展,則副顯示器的x點座標是1920,y點座標仍舊是0。若副顯示器是縱向擴展(能縱向擴展桌面嗎?這一點我是猜的),則副顯示器x座標爲0,y座標爲1080。基於這一點,只要知道枚舉出來的最後一個顯示器的{x, y}座標,加上這個顯示器的寬度和高度,就可以得到多顯示器組合起來的擴展後桌面的整體寬度和高度。需要說明的是,家裏只有兩個顯示器,所以目前只驗證了雙顯示器下上述算法的正確性,沒條件驗證更多顯示器情況下的情況。
得到了完整桌面的寬度和高度,截屏的事情就好辦了。截屏代碼如下所示:
void catchScreen(char *screenshotFilename)
{
long width, height;
getMultiMonitorPhysicalSize(width, height);
ostringstream oss;
oss << "capturing screen - width: " << width << ", height: " << height;
runtimeLogger.write(oss.str(), 0, 0, 0);
long imageSize = width * height * 4L;
char *buf = new char[imageSize];
HWND hDesktopWindow = GetDesktopWindow();
HDC displayDeviceContext = GetDC(hDesktopWindow);
HDC memoryDeviceContext = CreateCompatibleDC(0);
HBITMAP hbm = CreateCompatibleBitmap(displayDeviceContext, width, height);
SelectObject(memoryDeviceContext, hbm);
StretchBlt(memoryDeviceContext, 0, 0, width, height, displayDeviceContext, 0, 0, width, height, SRCCOPY);
BITMAPINFO bi;
bi.bmiHeader.biSize = sizeof(bi.bmiHeader);
bi.bmiHeader.biWidth = width;
bi.bmiHeader.biHeight = height;
bi.bmiHeader.biPlanes = 1;
bi.bmiHeader.biBitCount = 32;
bi.bmiHeader.biCompression = 0;
bi.bmiHeader.biSizeImage = 0;
GetDIBits(memoryDeviceContext, hbm, 0, height, buf, &bi, DIB_RGB_COLORS); // MSDN上查不到這個函數?
BITMAPFILEHEADER bif;
bif.bfType = MAKEWORD('B', 'M');
bif.bfSize = imageSize + 54;
bif.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER); ///// = 54;
BITMAPINFOHEADER bii;
bii.biSize = sizeof(BITMAPINFOHEADER); ///// = 40;
bii.biWidth = width;
bii.biHeight = height;
bii.biPlanes = 1;
bii.biBitCount = 32;
bii.biCompression = 0;
bii.biSizeImage = imageSize;
ofstream ofs(screenshotFilename, ofstream::binary | ofstream::out);
ofs.write((const char *)&bif, sizeof bif);
ofs.write((const char *)&bii, sizeof bii);
ofs.write(buf, imageSize);
delete[] buf;
DeleteDC(memoryDeviceContext);
ReleaseDC(hDesktopWindow, displayDeviceContext);
}