Add a rendered and content rectangle properties to VideoOutput QML.

So you can align controls with an edge of the rendered area.  There's
also some convenience functions for mapping source to and from
destination coordinates. Also tweaked the updateGeometry function to
bail out if no input parameters have changed, rather than doing some
more boring maths.

Added a unit test for a lot of the class, too.

Change-Id: I943bb45e4e56356247d5d6deb5d446222edcb7ca
Reviewed-by: Dmytro Poplavskiy <dmytro.poplavskiy@nokia.com>
This commit is contained in:
Michael Goddard
2012-01-20 15:51:22 +10:00
committed by Qt by Nokia
parent 3b00730eca
commit bad94a5329
6 changed files with 1039 additions and 24 deletions

View File

@@ -164,6 +164,7 @@ QDeclarativeVideoOutput::QDeclarativeVideoOutput(QQuickItem *parent) :
QQuickItem(parent),
m_sourceType(NoSource),
m_fillMode(PreserveAspectFit),
m_geometryDirty(true),
m_orientation(0)
{
setFlag(ItemHasContents, true);
@@ -306,6 +307,18 @@ static inline bool qIsDefaultAspect(int o)
return (o % 180) == 0;
}
/*
* Return the orientation normailized to 0-359
*/
static inline int qNormalizedOrientation(int o)
{
// Negative orientations give negative results
int o2 = o % 360;
if (o2 < 0)
o2 += 360;
return o2;
}
/*!
\qmlproperty enumeration VideoOutput::fillMode
@@ -331,6 +344,7 @@ void QDeclarativeVideoOutput::setFillMode(FillMode mode)
return;
m_fillMode = mode;
m_geometryDirty = true;
update();
emit fillModeChanged(mode);
@@ -346,52 +360,87 @@ void QDeclarativeVideoOutput::_q_updateNativeSize(const QVideoSurfaceFormat &for
if (m_nativeSize != size) {
m_nativeSize = size;
m_geometryDirty = true;
setImplicitWidth(size.width());
setImplicitHeight(size.height());
emit sourceRectChanged();
}
}
/* Based on fill mode and our size, figure out the source/dest rects */
void QDeclarativeVideoOutput::_q_updateGeometry()
{
QRectF rect(0, 0, width(), height());
if (!m_geometryDirty && m_lastSize == rect)
return;
QRectF oldContentRect(m_contentRect);
m_geometryDirty = false;
m_lastSize = rect;
if (m_nativeSize.isEmpty()) {
//this is necessary for item to receive the
//first paint event and configure video surface.
m_boundingRect = rect;
m_sourceRect = QRectF(0, 0, 1, 1);
m_renderedRect = rect;
m_contentRect = rect;
m_sourceTextureRect = QRectF(0, 0, 1, 1);
} else if (m_fillMode == Stretch) {
m_boundingRect = rect;
m_sourceRect = QRectF(0, 0, 1, 1);
m_renderedRect = rect;
m_contentRect = rect;
m_sourceTextureRect = QRectF(0, 0, 1, 1);
} else if (m_fillMode == PreserveAspectFit) {
QSizeF size = m_nativeSize;
size.scale(rect.size(), Qt::KeepAspectRatio);
m_boundingRect = QRectF(0, 0, size.width(), size.height());
m_boundingRect.moveCenter(rect.center());
m_renderedRect = QRectF(0, 0, size.width(), size.height());
m_renderedRect.moveCenter(rect.center());
m_contentRect = m_renderedRect;
m_sourceRect = QRectF(0, 0, 1, 1);
m_sourceTextureRect = QRectF(0, 0, 1, 1);
} else if (m_fillMode == PreserveAspectCrop) {
m_boundingRect = rect;
m_renderedRect = rect;
QSizeF size = rect.size();
size.scale(m_nativeSize, Qt::KeepAspectRatio);
QSizeF scaled = m_nativeSize;
scaled.scale(rect.size(), Qt::KeepAspectRatioByExpanding);
m_sourceRect = QRectF(
0, 0, size.width() / m_nativeSize.width(), size.height() / m_nativeSize.height());
m_sourceRect.moveCenter(QPointF(0.5, 0.5));
m_contentRect = QRectF(QPointF(), scaled);
m_contentRect.moveCenter(rect.center());
if (qIsDefaultAspect(m_orientation)) {
m_sourceTextureRect = QRectF((-m_contentRect.left()) / m_contentRect.width(),
(-m_contentRect.top()) / m_contentRect.height(),
rect.width() / m_contentRect.width(),
rect.height() / m_contentRect.height());
} else {
m_sourceTextureRect = QRectF((-m_contentRect.top()) / m_contentRect.height(),
(-m_contentRect.left()) / m_contentRect.width(),
rect.height() / m_contentRect.height(),
rect.width() / m_contentRect.width());
}
}
if (m_contentRect != oldContentRect)
emit contentRectChanged();
}
/*!
\qmlproperty int VideoOutput::orientation
Some sources of video frames have a strict orientation associated with them (for example,
the camera viewfinder), so that rotating the video output (for example via a portrait or
landscape user interface change) should leave the rendered video the same.
In some cases the source video stream requires a certain
orientation to be correct. This includes
sources like a camera viewfinder, where the displayed
viewfinder should match reality, no matter what rotation
the rest of the user interface has.
If you transform this element you may need to apply an adjustment to the
orientation via this property. This value uses degrees as the units, and must be
a multiple of 90 degrees.
This property allows you to apply a rotation (in steps
of 90 degrees) to compensate for any user interface
rotation, with positive values in the anti-clockwise direction.
The orientation change will also affect the mapping
of coordinates from source to viewport.
*/
int QDeclarativeVideoOutput::orientation() const
{
@@ -404,6 +453,10 @@ void QDeclarativeVideoOutput::setOrientation(int orientation)
if (orientation % 90)
return;
// If there's no actual change, return
if (m_orientation == orientation)
return;
// If the new orientation is the same effect
// as the old one, don't update the video node stuff
if ((m_orientation % 360) == (orientation % 360)) {
@@ -412,6 +465,8 @@ void QDeclarativeVideoOutput::setOrientation(int orientation)
return;
}
m_geometryDirty = true;
// Otherwise, a new orientation
// See if we need to change aspect ratio orientation too
bool oldAspect = qIsDefaultAspect(m_orientation);
@@ -424,12 +479,244 @@ void QDeclarativeVideoOutput::setOrientation(int orientation)
setImplicitWidth(m_nativeSize.width());
setImplicitHeight(m_nativeSize.height());
// Source rectangle does not change for orientation
}
update();
emit orientationChanged();
}
/*!
\qmlproperty rectangle VideoOutput::contentRect
This property holds the item coordinates of the area that
would contain video to render. With certain fill modes,
this rectangle will be larger than the visible area of this
element.
This property is useful when other coordinates are specified
in terms of the source dimensions - this applied for relative
(normalized) frame coordinates in the range of 0 to 1.0.
\sa mapRectToItem(), mapPointToItem()
Areas outside this will be transparent.
*/
QRectF QDeclarativeVideoOutput::contentRect() const
{
return m_contentRect;
}
/*!
\qmlproperty rectangle VideoOutput::sourceRect
This property holds the area of the source video
content that is considered for rendering. The
values are in source pixel coordinates.
Note that typically the top left corner of this rectangle
will be \c {0,0} while the width and height will be the
width and height of the input content.
The orientation setting does not affect this rectangle.
*/
QRectF QDeclarativeVideoOutput::sourceRect() const
{
// We might have to transpose back
QSizeF size = m_nativeSize;
if (!qIsDefaultAspect(m_orientation)) {
size.transpose();
}
return QRectF(QPointF(), size); // XXX ignores viewport
}
/*!
\qmlmethod mapNormalizedPointToItem
Given normalized coordinates \a point (that is, each
component in the range of 0 to 1.0), return the mapped point
that it corresponds to (in item coordinates).
This mapping is affected by the orientation.
Depending on the fill mode, this point may lie outside the rendered
rectangle.
*/
QPointF QDeclarativeVideoOutput::mapNormalizedPointToItem(const QPointF &point) const
{
qreal dx = point.x();
qreal dy = point.y();
if (qIsDefaultAspect(m_orientation)) {
dx *= m_contentRect.width();
dy *= m_contentRect.height();
} else {
dx *= m_contentRect.height();
dy *= m_contentRect.width();
}
switch (qNormalizedOrientation(m_orientation)) {
case 0:
default:
return m_contentRect.topLeft() + QPointF(dx, dy);
case 90:
return m_contentRect.bottomLeft() + QPointF(dy, -dx);
case 180:
return m_contentRect.bottomRight() + QPointF(-dx, -dy);
case 270:
return m_contentRect.topRight() + QPointF(-dy, dx);
}
}
/*!
\qmlmethod mapNormalizedRectToItem
Given a rectangle \a rectangle in normalized
coordinates (that is, each component in the range of 0 to 1.0),
return the mapped rectangle that it corresponds to (in item coordinates).
This mapping is affected by the orientation.
Depending on the fill mode, this rectangle may extend outside the rendered
rectangle.
*/
QRectF QDeclarativeVideoOutput::mapNormalizedRectToItem(const QRectF &rectangle) const
{
return QRectF(mapNormalizedPointToItem(rectangle.topLeft()),
mapNormalizedPointToItem(rectangle.bottomRight())).normalized();
}
/*!
\qmlmethod mapPointToItem
Given a point \a point in item coordinates, return the
corresponding point in source coordinates. This mapping is
affected by the orientation.
If the supplied point lies outside the rendered area, the returned
point will be outside the source rectangle.
*/
QPointF QDeclarativeVideoOutput::mapPointToSource(const QPointF &point) const
{
QPointF norm = mapPointToSourceNormalized(point);
if (qIsDefaultAspect(m_orientation))
return QPointF(norm.x() * m_nativeSize.width(), norm.y() * m_nativeSize.height());
else
return QPointF(norm.x() * m_nativeSize.height(), norm.y() * m_nativeSize.width());
}
/*!
\qmlmethod mapRectToSource
Given a rectangle \a rectangle in item coordinates, return the
corresponding rectangle in source coordinates. This mapping is
affected by the orientation.
This mapping is affected by the orientation.
If the supplied point lies outside the rendered area, the returned
point will be outside the source rectangle.
*/
QRectF QDeclarativeVideoOutput::mapRectToSource(const QRectF &rectangle) const
{
return QRectF(mapPointToSource(rectangle.topLeft()),
mapPointToSource(rectangle.bottomRight())).normalized();
}
/*!
\qmlmethod mapPointToItemNormalized
Given a point \a point in item coordinates, return the
corresponding point in normalized source coordinates. This mapping is
affected by the orientation.
If the supplied point lies outside the rendered area, the returned
point will be outside the source rectangle. No clamping is performed.
*/
QPointF QDeclarativeVideoOutput::mapPointToSourceNormalized(const QPointF &point) const
{
if (m_contentRect.isEmpty())
return QPointF();
// Normalize the item source point
qreal nx = (point.x() - m_contentRect.left()) / m_contentRect.width();
qreal ny = (point.y() - m_contentRect.top()) / m_contentRect.height();
const qreal one(1.0f);
// For now, the origin of the source rectangle is 0,0
switch (qNormalizedOrientation(m_orientation)) {
case 0:
default:
return QPointF(nx, ny);
case 90:
return QPointF(one - ny, nx);
case 180:
return QPointF(one - nx, one - ny);
case 270:
return QPointF(ny, one - nx);
}
}
/*!
\qmlmethod mapRectToSourceNormalized
Given a rectangle \a rectangle in item coordinates, return the
corresponding rectangle in normalized source coordinates. This mapping is
affected by the orientation.
This mapping is affected by the orientation.
If the supplied point lies outside the rendered area, the returned
point will be outside the source rectangle. No clamping is performed.
*/
QRectF QDeclarativeVideoOutput::mapRectToSourceNormalized(const QRectF &rectangle) const
{
return QRectF(mapPointToSourceNormalized(rectangle.topLeft()),
mapPointToSourceNormalized(rectangle.bottomRight())).normalized();
}
/*!
\qmlmethod mapPointToItem
Given a point \a point in source coordinates, return the
corresponding point in item coordinates. This mapping is
affected by the orientation.
Depending on the fill mode, this point may lie outside the rendered
rectangle.
*/
QPointF QDeclarativeVideoOutput::mapPointToItem(const QPointF &point) const
{
if (m_nativeSize.isEmpty())
return QPointF();
// Just normalize and use that function
// m_nativeSize is transposed in some orientations
if (qIsDefaultAspect(m_orientation))
return mapNormalizedPointToItem(QPointF(point.x() / m_nativeSize.width(), point.y() / m_nativeSize.height()));
else
return mapNormalizedPointToItem(QPointF(point.x() / m_nativeSize.height(), point.y() / m_nativeSize.width()));
}
/*!
\qmlmethod mapRectToItem
Given a rectangle \a rectangle in source coordinates, return the
corresponding rectangle in item coordinates. This mapping is
affected by the orientation.
Depending on the fill mode, this rectangle may extend outside the rendered
rectangle.
*/
QRectF QDeclarativeVideoOutput::mapRectToItem(const QRectF &rectangle) const
{
return QRectF(mapPointToItem(rectangle.topLeft()),
mapPointToItem(rectangle.bottomRight())).normalized();
}
QSGNode *QDeclarativeVideoOutput::updatePaintNode(QSGNode *oldNode, UpdatePaintNodeData *)
{
QSGVideoNode *videoNode = static_cast<QSGVideoNode *>(oldNode);
@@ -464,7 +751,7 @@ QSGNode *QDeclarativeVideoOutput::updatePaintNode(QSGNode *oldNode, UpdatePaintN
_q_updateGeometry();
// Negative rotations need lots of %360
videoNode->setTexturedRectGeometry(m_boundingRect, m_sourceRect, (360 + (m_orientation % 360)) % 360);
videoNode->setTexturedRectGeometry(m_renderedRect, m_sourceTextureRect, qNormalizedOrientation(m_orientation));
videoNode->setCurrentFrame(m_frame);
return videoNode;
}

View File

@@ -42,6 +42,8 @@
#ifndef QDECLARATIVEVIDEOOUTPUT_P_H
#define QDECLARATIVEVIDEOOUTPUT_P_H
#include <QtCore/QRectF>
#include <QtQuick/QQuickItem>
#include <QtMultimedia/qvideoframe.h>
@@ -66,6 +68,8 @@ class QDeclarativeVideoOutput : public QQuickItem
Q_PROPERTY(QObject* source READ source WRITE setSource NOTIFY sourceChanged)
Q_PROPERTY(FillMode fillMode READ fillMode WRITE setFillMode NOTIFY fillModeChanged)
Q_PROPERTY(int orientation READ orientation WRITE setOrientation NOTIFY orientationChanged)
Q_PROPERTY(QRectF sourceRect READ sourceRect NOTIFY sourceRectChanged)
Q_PROPERTY(QRectF contentRect READ contentRect NOTIFY contentRectChanged)
Q_ENUMS(FillMode)
public:
@@ -88,10 +92,24 @@ public:
int orientation() const;
void setOrientation(int);
QRectF sourceRect() const;
QRectF contentRect() const;
Q_INVOKABLE QPointF mapPointToItem(const QPointF &point) const;
Q_INVOKABLE QRectF mapRectToItem(const QRectF &rectangle) const;
Q_INVOKABLE QPointF mapNormalizedPointToItem(const QPointF &point) const;
Q_INVOKABLE QRectF mapNormalizedRectToItem(const QRectF &rectangle) const;
Q_INVOKABLE QPointF mapPointToSource(const QPointF &point) const;
Q_INVOKABLE QRectF mapRectToSource(const QRectF &rectangle) const;
Q_INVOKABLE QPointF mapPointToSourceNormalized(const QPointF &point) const;
Q_INVOKABLE QRectF mapRectToSourceNormalized(const QRectF &rectangle) const;
Q_SIGNALS:
void sourceChanged();
void fillModeChanged(QDeclarativeVideoOutput::FillMode);
void orientationChanged();
void sourceRectChanged();
void contentRectChanged();
protected:
QSGNode *updatePaintNode(QSGNode *, UpdatePaintNodeData *);
@@ -125,8 +143,12 @@ private:
QVideoFrame m_frame;
FillMode m_fillMode;
QSize m_nativeSize;
QRectF m_boundingRect;
QRectF m_sourceRect;
bool m_geometryDirty;
QRectF m_lastSize; // Cache of last size to avoid recalculating geometry
QRectF m_renderedRect; // Destination pixel coordinates, clipped
QRectF m_contentRect; // Destination pixel coordinates, unclipped
QRectF m_sourceTextureRect; // Source texture coordinates
int m_orientation;
QMutex m_frameMutex;

View File

@@ -4,6 +4,7 @@ SUBDIRS += \
qaudiodeviceinfo \
qaudioinput \
qaudiooutput \
qdeclarativevideooutput \
qmediaplayerbackend \
qcamerabackend \
qsoundeffect \

View File

@@ -0,0 +1,13 @@
CONFIG += testcase
TARGET = tst_qdeclarativevideooutput
QT += multimedia-private declarative testlib quick
CONFIG += no_private_qt_headers_warning
OTHER_FILES += \
../../../../src/imports/multimedia/qdeclarativevideooutput_p.h
SOURCES += \
tst_qdeclarativevideooutput.cpp
INCLUDEPATH += ../../../../src/imports/multimedia

View File

@@ -0,0 +1,692 @@
/****************************************************************************
**
** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation (qt-info@nokia.com)
**
** This file is part of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** GNU Lesser General Public License Usage
** This file may be used under the terms of the GNU Lesser General Public
** License version 2.1 as published by the Free Software Foundation and
** appearing in the file LICENSE.LGPL included in the packaging of this
** file. Please review the following information to ensure the GNU Lesser
** General Public License version 2.1 requirements will be met:
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Nokia gives you certain additional
** rights. These rights are described in the Nokia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU General
** Public License version 3.0 as published by the Free Software Foundation
** and appearing in the file LICENSE.GPL included in the packaging of this
** file. Please review the following information to ensure the GNU General
** Public License version 3.0 requirements will be met:
** http://www.gnu.org/copyleft/gpl.html.
**
** Other Usage
** Alternatively, this file may be used in accordance with the terms and
** conditions contained in a signed written agreement between you and Nokia.
**
**
**
**
**
** $QT_END_LICENSE$
**
****************************************************************************/
//TESTED_COMPONENT=plugins/declarative/multimedia
#include <QtTest/QtTest>
#include <QDeclarativeEngine>
#include <QDeclarativeComponent>
#include "qdeclarativevideooutput_p.h"
#include <qabstractvideosurface.h>
#include <qvideorenderercontrol.h>
#include <qmediaobject.h>
class SurfaceHolder : public QObject
{
Q_OBJECT
Q_PROPERTY(QAbstractVideoSurface *videoSurface READ videoSurface WRITE setVideoSurface)
public:
SurfaceHolder(QObject *parent)
: QObject(parent)
, m_surface(0)
{
}
QAbstractVideoSurface *videoSurface() const
{
return m_surface;
}
void setVideoSurface(QAbstractVideoSurface *surface)
{
m_surface = surface;
}
void presentDummyFrame(const QSize &size);
private:
QAbstractVideoSurface *m_surface;
};
// Starts the surface and puts a frame
void SurfaceHolder::presentDummyFrame(const QSize &size)
{
if (m_surface && m_surface->supportedPixelFormats().count() > 0) {
QVideoFrame::PixelFormat pixelFormat = m_surface->supportedPixelFormats().value(0);
QVideoSurfaceFormat format(size, pixelFormat);
QVideoFrame frame(size.width() * size.height() * 4, size, size.width() * 4, pixelFormat);
if (!m_surface->isActive())
m_surface->start(format);
m_surface->present(frame);
// Have to spin an event loop or two for the surfaceFormatChanged() signal
qApp->processEvents();
}
}
class tst_QDeclarativeVideoOutput : public QObject
{
Q_OBJECT
public:
tst_QDeclarativeVideoOutput()
: m_mappingComponent(0)
, m_mappingOutput(0)
, m_mappingSurface(0)
{
qRegisterMetaType<QDeclarativeVideoOutput::FillMode>();
}
~tst_QDeclarativeVideoOutput()
{
delete m_mappingOutput;
delete m_mappingSurface;
delete m_mappingComponent;
}
public slots:
void initTestCase();
private slots:
void fillMode();
void orientation();
void surfaceSource();
void sourceRect();
void contentRect();
void contentRect_data();
void mappingPoint();
void mappingPoint_data();
void mappingRect();
void mappingRect_data();
private:
QDeclarativeEngine m_engine;
QByteArray m_plainQML;
// Variables used for the mapping test
QDeclarativeComponent *m_mappingComponent;
QObject *m_mappingOutput;
SurfaceHolder *m_mappingSurface;
void updateOutputGeometry(QObject *output);
QRectF invokeR2R(QObject *object, const char *signature, const QRectF &rect);
QPointF invokeP2P(QObject *object, const char *signature, const QPointF &point);
};
void tst_QDeclarativeVideoOutput::initTestCase()
{
m_plainQML = \
"import QtQuick 2.0\n" \
"import QtMultimedia 5.0\n" \
"VideoOutput {" \
" width: 150;" \
" height: 100;" \
"}";
// We initialize the mapping vars here
m_mappingComponent = new QDeclarativeComponent(&m_engine);
m_mappingComponent->setData(m_plainQML, QUrl());
m_mappingSurface = new SurfaceHolder(this);
m_mappingOutput = m_mappingComponent->create();
QVERIFY(m_mappingOutput != 0);
m_mappingOutput->setProperty("source", QVariant::fromValue(static_cast<QObject*>(m_mappingSurface)));
m_mappingSurface->presentDummyFrame(QSize(200,100)); // this should start m_surface
updateOutputGeometry(m_mappingOutput);
}
Q_DECLARE_METATYPE(QDeclarativeVideoOutput::FillMode)
void tst_QDeclarativeVideoOutput::fillMode()
{
QDeclarativeComponent component(&m_engine);
component.setData(m_plainQML, QUrl());
QObject *videoOutput = component.create();
QVERIFY(videoOutput != 0);
QSignalSpy propSpy(videoOutput, SIGNAL(fillModeChanged(QDeclarativeVideoOutput::FillMode)));
// Default is preserveaspectfit
QCOMPARE(videoOutput->property("fillMode").value<QDeclarativeVideoOutput::FillMode>(), QDeclarativeVideoOutput::PreserveAspectFit);
QCOMPARE(propSpy.count(), 0);
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::PreserveAspectCrop)));
QCOMPARE(videoOutput->property("fillMode").value<QDeclarativeVideoOutput::FillMode>(), QDeclarativeVideoOutput::PreserveAspectCrop);
QCOMPARE(propSpy.count(), 1);
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::Stretch)));
QCOMPARE(videoOutput->property("fillMode").value<QDeclarativeVideoOutput::FillMode>(), QDeclarativeVideoOutput::Stretch);
QCOMPARE(propSpy.count(), 2);
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::Stretch)));
QCOMPARE(videoOutput->property("fillMode").value<QDeclarativeVideoOutput::FillMode>(), QDeclarativeVideoOutput::Stretch);
QCOMPARE(propSpy.count(), 2);
delete videoOutput;
}
void tst_QDeclarativeVideoOutput::orientation()
{
QDeclarativeComponent component(&m_engine);
component.setData(m_plainQML, QUrl());
QObject *videoOutput = component.create();
QVERIFY(videoOutput != 0);
QSignalSpy propSpy(videoOutput, SIGNAL(orientationChanged()));
// Default orientation is 0
QCOMPARE(videoOutput->property("orientation").toInt(), 0);
QCOMPARE(propSpy.count(), 0);
videoOutput->setProperty("orientation", QVariant(90));
QCOMPARE(videoOutput->property("orientation").toInt(), 90);
QCOMPARE(propSpy.count(), 1);
videoOutput->setProperty("orientation", QVariant(180));
QCOMPARE(videoOutput->property("orientation").toInt(), 180);
QCOMPARE(propSpy.count(), 2);
videoOutput->setProperty("orientation", QVariant(270));
QCOMPARE(videoOutput->property("orientation").toInt(), 270);
QCOMPARE(propSpy.count(), 3);
videoOutput->setProperty("orientation", QVariant(360));
QCOMPARE(videoOutput->property("orientation").toInt(), 360);
QCOMPARE(propSpy.count(), 4);
// More than 360 should be fine
videoOutput->setProperty("orientation", QVariant(540));
QCOMPARE(videoOutput->property("orientation").toInt(), 540);
QCOMPARE(propSpy.count(), 5);
// Negative should be fine
videoOutput->setProperty("orientation", QVariant(-180));
QCOMPARE(videoOutput->property("orientation").toInt(), -180);
QCOMPARE(propSpy.count(), 6);
// Same value should not reemit
videoOutput->setProperty("orientation", QVariant(-180));
QCOMPARE(videoOutput->property("orientation").toInt(), -180);
QCOMPARE(propSpy.count(), 6);
// Non multiples of 90 should not work
videoOutput->setProperty("orientation", QVariant(-1));
QCOMPARE(videoOutput->property("orientation").toInt(), -180);
QCOMPARE(propSpy.count(), 6);
delete videoOutput;
}
void tst_QDeclarativeVideoOutput::surfaceSource()
{
QDeclarativeComponent component(&m_engine);
component.setData(m_plainQML, QUrl());
QObject *videoOutput = component.create();
QVERIFY(videoOutput != 0);
SurfaceHolder holder(this);
QCOMPARE(holder.videoSurface(), static_cast<QAbstractVideoSurface*>(0));
videoOutput->setProperty("source", QVariant::fromValue(static_cast<QObject*>(&holder)));
QVERIFY(holder.videoSurface() != 0);
// Now we could do things with the surface..
QList<QVideoFrame::PixelFormat> formats = holder.videoSurface()->supportedPixelFormats();
QVERIFY(formats.count() > 0);
// See if we can start and stop each pixel format (..)
foreach (QVideoFrame::PixelFormat format, formats) {
QVideoSurfaceFormat surfaceFormat(QSize(200,100), format);
QVERIFY(holder.videoSurface()->isFormatSupported(surfaceFormat)); // This does kind of depend on node factories
QVERIFY(holder.videoSurface()->start(surfaceFormat));
QVERIFY(holder.videoSurface()->surfaceFormat() == surfaceFormat);
QVERIFY(holder.videoSurface()->isActive());
holder.videoSurface()->stop();
QVERIFY(!holder.videoSurface()->isActive());
}
delete videoOutput;
// This should clear the surface
QEXPECT_FAIL("", "Surface not cleared on destruction", Continue);
QCOMPARE(holder.videoSurface(), static_cast<QAbstractVideoSurface*>(0));
// Also, creating two sources, setting them in order, and destroying the first
// should not zero holder.videoSurface()
videoOutput = component.create();
videoOutput->setProperty("source", QVariant::fromValue(static_cast<QObject*>(&holder)));
QAbstractVideoSurface *surface = holder.videoSurface();
QVERIFY(holder.videoSurface());
QObject *videoOutput2 = component.create();
QVERIFY(videoOutput2);
videoOutput2->setProperty("source", QVariant::fromValue(static_cast<QObject*>(&holder)));
QVERIFY(holder.videoSurface());
QVERIFY(holder.videoSurface() != surface); // Surface should have changed
surface = holder.videoSurface();
// Now delete first one
delete videoOutput;
QVERIFY(holder.videoSurface());
QVERIFY(holder.videoSurface() == surface); // Should not have changed surface
// Now create a second surface and assign it as the source
// The old surface holder should be zeroed
SurfaceHolder holder2(this);
videoOutput2->setProperty("source", QVariant::fromValue(static_cast<QObject*>(&holder2)));
QCOMPARE(holder.videoSurface(), static_cast<QAbstractVideoSurface*>(0));
QVERIFY(holder2.videoSurface() != 0);
// XXX May be worth adding tests that the surface activeChanged signals are sent appropriately
// to holder?
delete videoOutput2;
}
void tst_QDeclarativeVideoOutput::sourceRect()
{
QDeclarativeComponent component(&m_engine);
component.setData(m_plainQML, QUrl());
QObject *videoOutput = component.create();
QVERIFY(videoOutput != 0);
SurfaceHolder holder(this);
QSignalSpy propSpy(videoOutput, SIGNAL(sourceRectChanged()));
videoOutput->setProperty("source", QVariant::fromValue(static_cast<QObject*>(&holder)));
QRectF invalid(0,0,-1,-1);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), invalid);
holder.presentDummyFrame(QSize(200,100));
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
QCOMPARE(propSpy.count(), 1);
// Another frame shouldn't cause a source rect change
holder.presentDummyFrame(QSize(200,100));
QCOMPARE(propSpy.count(), 1);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
// Changing orientation and stretch modes should not affect this
videoOutput->setProperty("orientation", QVariant(90));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("orientation", QVariant(180));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("orientation", QVariant(270));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("orientation", QVariant(-90));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::PreserveAspectCrop)));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::Stretch)));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
videoOutput->setProperty("fillMode", QVariant(int(QDeclarativeVideoOutput::Stretch)));
updateOutputGeometry(videoOutput);
QCOMPARE(videoOutput->property("sourceRect").toRectF(), QRectF(0, 0, 200, 100));
delete videoOutput;
}
void tst_QDeclarativeVideoOutput::mappingPoint()
{
QFETCH(QPointF, point);
QFETCH(int, orientation);
QFETCH(QDeclarativeVideoOutput::FillMode, fillMode);
QFETCH(QPointF, expected);
QVERIFY(m_mappingOutput);
m_mappingOutput->setProperty("orientation", QVariant(orientation));
m_mappingOutput->setProperty("fillMode", QVariant::fromValue(fillMode));
updateOutputGeometry(m_mappingOutput);
QPointF output = invokeP2P(m_mappingOutput, "mapPointToItem", point);
QPointF reverse = invokeP2P(m_mappingOutput, "mapPointToSource", output);
QCOMPARE(output, expected);
QCOMPARE(reverse, point);
// Now the normalized versions
// Source rectangle is 200x100
QPointF normal(point.x() / 200, point.y() / 100);
output = invokeP2P(m_mappingOutput, "mapNormalizedPointToItem", normal);
reverse = invokeP2P(m_mappingOutput, "mapPointToSourceNormalized", output);
QCOMPARE(output, expected);
QCOMPARE(reverse, normal);
}
void tst_QDeclarativeVideoOutput::mappingPoint_data()
{
QTest::addColumn<QPointF>("point");
QTest::addColumn<int>("orientation");
QTest::addColumn<QDeclarativeVideoOutput::FillMode>("fillMode");
QTest::addColumn<QPointF>("expected");
QDeclarativeVideoOutput::FillMode stretch = QDeclarativeVideoOutput::Stretch;
QDeclarativeVideoOutput::FillMode fit = QDeclarativeVideoOutput::PreserveAspectFit;
QDeclarativeVideoOutput::FillMode crop = QDeclarativeVideoOutput::PreserveAspectCrop;
// First make sure the component has processed the frame
QCOMPARE(m_mappingOutput->property("sourceRect").toRectF(), QRectF(0,0,200,100));
// 200x100 -> 150,100 stretch, 150x75 fit @ 12.5f, 200x100 @-25,0 crop
// Corners, then the center, then a point in the middle somewhere
QTest::newRow("s0-0") << QPointF(0,0) << 0 << stretch << QPointF(0,0);
QTest::newRow("s1-0") << QPointF(200,0) << 0 << stretch << QPointF(150,0);
QTest::newRow("s2-0") << QPointF(0,100) << 0 << stretch << QPointF(0,100);
QTest::newRow("s3-0") << QPointF(200,100) << 0 << stretch << QPointF(150,100);
QTest::newRow("s4-0") << QPointF(100,50) << 0 << stretch << QPointF(75,50);
QTest::newRow("s5-0") << QPointF(40,80) << 0 << stretch << QPointF(30,80);
QTest::newRow("f0-0") << QPointF(0,0) << 0 << fit << QPointF(0,12.5f);
QTest::newRow("f1-0") << QPointF(200,0) << 0 << fit << QPointF(150,12.5f);
QTest::newRow("f2-0") << QPointF(0,100) << 0 << fit << QPointF(0,87.5f);
QTest::newRow("f3-0") << QPointF(200,100) << 0 << fit << QPointF(150,87.5f);
QTest::newRow("f4-0") << QPointF(100,50) << 0 << stretch << QPointF(75,50);
QTest::newRow("f5-0") << QPointF(40,80) << 0 << stretch << QPointF(30,80);
QTest::newRow("c0-0") << QPointF(0,0) << 0 << crop << QPointF(-25,0);
QTest::newRow("c1-0") << QPointF(200,0) << 0 << crop << QPointF(175,0);
QTest::newRow("c2-0") << QPointF(0,100) << 0 << crop << QPointF(-25,100);
QTest::newRow("c3-0") << QPointF(200,100) << 0 << crop << QPointF(175,100);
QTest::newRow("c4-0") << QPointF(100,50) << 0 << stretch << QPointF(75,50);
QTest::newRow("c5-0") << QPointF(40,80) << 0 << stretch << QPointF(30,80);
// 90 degrees (anti clockwise)
QTest::newRow("s0-90") << QPointF(0,0) << 90 << stretch << QPointF(0,100);
QTest::newRow("s1-90") << QPointF(200,0) << 90 << stretch << QPointF(0,0);
QTest::newRow("s2-90") << QPointF(0,100) << 90 << stretch << QPointF(150,100);
QTest::newRow("s3-90") << QPointF(200,100) << 90 << stretch << QPointF(150,0);
QTest::newRow("s4-90") << QPointF(100,50) << 90 << stretch << QPointF(75,50);
QTest::newRow("s5-90") << QPointF(40,80) << 90 << stretch << QPointF(120,80);
QTest::newRow("f0-90") << QPointF(0,0) << 90 << fit << QPointF(50,100);
QTest::newRow("f1-90") << QPointF(200,0) << 90 << fit << QPointF(50,0);
QTest::newRow("f2-90") << QPointF(0,100) << 90 << fit << QPointF(100,100);
QTest::newRow("f3-90") << QPointF(200,100) << 90 << fit << QPointF(100,0);
QTest::newRow("f4-90") << QPointF(100,50) << 90 << fit << QPointF(75,50);
QTest::newRow("f5-90") << QPointF(40,80) << 90 << fit << QPointF(90,80);
QTest::newRow("c0-90") << QPointF(0,0) << 90 << crop << QPointF(0,200);
QTest::newRow("c1-90") << QPointF(200,0) << 90 << crop << QPointF(0,-100);
QTest::newRow("c2-90") << QPointF(0,100) << 90 << crop << QPointF(150,200);
QTest::newRow("c3-90") << QPointF(200,100) << 90 << crop << QPointF(150,-100);
QTest::newRow("c4-90") << QPointF(100,50) << 90 << crop << QPointF(75,50);
QTest::newRow("c5-90") << QPointF(40,80) << 90 << crop << QPointF(120,140);
// 180
QTest::newRow("s0-180") << QPointF(0,0) << 180 << stretch << QPointF(150,100);
QTest::newRow("s1-180") << QPointF(200,0) << 180 << stretch << QPointF(0,100);
QTest::newRow("s2-180") << QPointF(0,100) << 180 << stretch << QPointF(150,0);
QTest::newRow("s3-180") << QPointF(200,100) << 180 << stretch << QPointF(0,0);
QTest::newRow("s4-180") << QPointF(100,50) << 180 << stretch << QPointF(75,50);
QTest::newRow("s5-180") << QPointF(40,80) << 180 << stretch << QPointF(120,20);
QTest::newRow("f0-180") << QPointF(0,0) << 180 << fit << QPointF(150,87.5f);
QTest::newRow("f1-180") << QPointF(200,0) << 180 << fit << QPointF(0,87.5f);
QTest::newRow("f2-180") << QPointF(0,100) << 180 << fit << QPointF(150,12.5f);
QTest::newRow("f3-180") << QPointF(200,100) << 180 << fit << QPointF(0,12.5f);
QTest::newRow("f4-180") << QPointF(100,50) << 180 << fit << QPointF(75,50);
QTest::newRow("f5-180") << QPointF(40,80) << 180 << fit << QPointF(120,27.5f);
QTest::newRow("c0-180") << QPointF(0,0) << 180 << crop << QPointF(175,100);
QTest::newRow("c1-180") << QPointF(200,0) << 180 << crop << QPointF(-25,100);
QTest::newRow("c2-180") << QPointF(0,100) << 180 << crop << QPointF(175,0);
QTest::newRow("c3-180") << QPointF(200,100) << 180 << crop << QPointF(-25,0);
QTest::newRow("c4-180") << QPointF(100,50) << 180 << crop << QPointF(75,50);
QTest::newRow("c5-180") << QPointF(40,80) << 180 << crop << QPointF(135,20);
// 270
QTest::newRow("s0-270") << QPointF(0,0) << 270 << stretch << QPointF(150,0);
QTest::newRow("s1-270") << QPointF(200,0) << 270 << stretch << QPointF(150,100);
QTest::newRow("s2-270") << QPointF(0,100) << 270 << stretch << QPointF(0,0);
QTest::newRow("s3-270") << QPointF(200,100) << 270 << stretch << QPointF(0,100);
QTest::newRow("s4-270") << QPointF(100,50) << 270 << stretch << QPointF(75,50);
QTest::newRow("s5-270") << QPointF(40,80) << 270 << stretch << QPointF(30,20);
QTest::newRow("f0-270") << QPointF(0,0) << 270 << fit << QPointF(100,0);
QTest::newRow("f1-270") << QPointF(200,0) << 270 << fit << QPointF(100,100);
QTest::newRow("f2-270") << QPointF(0,100) << 270 << fit << QPointF(50,0);
QTest::newRow("f3-270") << QPointF(200,100) << 270 << fit << QPointF(50,100);
QTest::newRow("f4-270") << QPointF(100,50) << 270 << fit << QPointF(75,50);
QTest::newRow("f5-270") << QPointF(40,80) << 270 << fit << QPointF(60,20);
QTest::newRow("c0-270") << QPointF(0,0) << 270 << crop << QPointF(150,-100);
QTest::newRow("c1-270") << QPointF(200,0) << 270 << crop << QPointF(150,200);
QTest::newRow("c2-270") << QPointF(0,100) << 270 << crop << QPointF(0,-100);
QTest::newRow("c3-270") << QPointF(200,100) << 270 << crop << QPointF(0,200);
QTest::newRow("c4-270") << QPointF(100,50) << 270 << crop << QPointF(75,50);
QTest::newRow("c5-270") << QPointF(40,80) << 270 << crop << QPointF(30,-40);
}
/* Test all rectangle mapping */
void tst_QDeclarativeVideoOutput::mappingRect()
{
QFETCH(QRectF, rect);
QFETCH(int, orientation);
QFETCH(QDeclarativeVideoOutput::FillMode, fillMode);
QFETCH(QRectF, expected);
QVERIFY(m_mappingOutput);
m_mappingOutput->setProperty("orientation", QVariant(orientation));
m_mappingOutput->setProperty("fillMode", QVariant::fromValue(fillMode));
updateOutputGeometry(m_mappingOutput);
QRectF output = invokeR2R(m_mappingOutput, "mapRectToItem", rect);
QRectF reverse = invokeR2R(m_mappingOutput, "mapRectToSource", output);
QCOMPARE(output, expected);
QCOMPARE(reverse, rect);
// Now the normalized versions
// Source rectangle is 200x100
QRectF normal(rect.x() / 200, rect.y() / 100, rect.width() / 200, rect.height() / 100);
output = invokeR2R(m_mappingOutput, "mapNormalizedRectToItem", normal);
reverse = invokeR2R(m_mappingOutput, "mapRectToSourceNormalized", output);
QCOMPARE(output, expected);
QCOMPARE(reverse, normal);
}
void tst_QDeclarativeVideoOutput::mappingRect_data()
{
QTest::addColumn<QRectF>("rect");
QTest::addColumn<int>("orientation");
QTest::addColumn<QDeclarativeVideoOutput::FillMode>("fillMode");
QTest::addColumn<QRectF>("expected");
// First make sure the component has processed the frame
QCOMPARE(m_mappingOutput->property("sourceRect").toRectF(), QRectF(0,0,200,100));
QDeclarativeVideoOutput::FillMode stretch = QDeclarativeVideoOutput::Stretch;
QDeclarativeVideoOutput::FillMode fit = QDeclarativeVideoOutput::PreserveAspectFit;
QDeclarativeVideoOutput::FillMode crop = QDeclarativeVideoOutput::PreserveAspectCrop;
// Full rectangle mapping
// Stretch
QTest::newRow("s0") << QRectF(0,0, 200, 100) << 0 << stretch << QRectF(0,0,150,100);
QTest::newRow("s90") << QRectF(0,0, 200, 100) << 90 << stretch << QRectF(0,0,150,100);
QTest::newRow("s180") << QRectF(0,0, 200, 100) << 180 << stretch << QRectF(0,0,150,100);
QTest::newRow("s270") << QRectF(0,0, 200, 100) << 270 << stretch << QRectF(0,0,150,100);
// Fit
QTest::newRow("f0") << QRectF(0,0, 200, 100) << 0 << fit << QRectF(0,12.5f,150,75);
QTest::newRow("f90") << QRectF(0,0, 200, 100) << 90 << fit << QRectF(50,0,50,100);
QTest::newRow("f180") << QRectF(0,0, 200, 100) << 180 << fit << QRectF(0,12.5f,150,75);
QTest::newRow("f270") << QRectF(0,0, 200, 100) << 270 << fit << QRectF(50,0,50,100);
// Crop
QTest::newRow("c0") << QRectF(0,0, 200, 100) << 0 << crop << QRectF(-25,0,200,100);
QTest::newRow("c90") << QRectF(0,0, 200, 100) << 90 << crop << QRectF(0,-100,150,300);
QTest::newRow("c180") << QRectF(0,0, 200, 100) << 180 << crop << QRectF(-25,0,200,100);
QTest::newRow("c270") << QRectF(0,0, 200, 100) << 270 << crop << QRectF(0,-100,150,300);
// Partial rectangle mapping
// Stretch
// 50-130 in x (0.25 - 0.65), 25-50 (0.25 - 0.5) in y (out of 200, 100) -> 150x100
QTest::newRow("p-s0") << QRectF(50, 25, 80, 25) << 0 << stretch << QRectF(37.5f,25,60,25);
QTest::newRow("p-s90") << QRectF(50, 25, 80, 25) << 90 << stretch << QRectF(37.5f,35,37.5f,40);
QTest::newRow("p-s180") << QRectF(50, 25, 80, 25) << 180 << stretch << QRectF(52.5f,50,60,25);
QTest::newRow("p-s270") << QRectF(50, 25, 80, 25) << 270 << stretch << QRectF(75,25,37.5f,40);
// Fit
QTest::newRow("p-f0") << QRectF(50, 25, 80, 25) << 0 << fit << QRectF(37.5f,31.25f,60,18.75f);
QTest::newRow("p-f90") << QRectF(50, 25, 80, 25) << 90 << fit << QRectF(62.5f,35,12.5f,40);
QTest::newRow("p-f180") << QRectF(50, 25, 80, 25) << 180 << fit << QRectF(52.5f,50,60,18.75f);
QTest::newRow("p-f270") << QRectF(50, 25, 80, 25) << 270 << fit << QRectF(75,25,12.5f,40);
// Crop
QTest::newRow("p-c0") << QRectF(50, 25, 80, 25) << 0 << crop << QRectF(25,25,80,25);
QTest::newRow("p-c90") << QRectF(50, 25, 80, 25) << 90 << crop << QRectF(37.5f,5,37.5f,120);
QTest::newRow("p-c180") << QRectF(50, 25, 80, 25) << 180 << crop << QRectF(45,50,80,25);
QTest::newRow("p-c270") << QRectF(50, 25, 80, 25) << 270 << crop << QRectF(75,-25,37.5f,120);
}
void tst_QDeclarativeVideoOutput::updateOutputGeometry(QObject *output)
{
// Since the object isn't visible, update() doesn't do anything
// so we manually force this
QMetaObject::invokeMethod(output, "_q_updateGeometry");
}
void tst_QDeclarativeVideoOutput::contentRect()
{
QFETCH(int, orientation);
QFETCH(QDeclarativeVideoOutput::FillMode, fillMode);
QFETCH(QRectF, expected);
QVERIFY(m_mappingOutput);
m_mappingOutput->setProperty("orientation", QVariant(orientation));
m_mappingOutput->setProperty("fillMode", QVariant::fromValue(fillMode));
updateOutputGeometry(m_mappingOutput);
QRectF output = m_mappingOutput->property("contentRect").toRectF();
QCOMPARE(output, expected);
}
void tst_QDeclarativeVideoOutput::contentRect_data()
{
QTest::addColumn<int>("orientation");
QTest::addColumn<QDeclarativeVideoOutput::FillMode>("fillMode");
QTest::addColumn<QRectF>("expected");
// First make sure the component has processed the frame
QCOMPARE(m_mappingOutput->property("sourceRect").toRectF(), QRectF(0,0,200,100));
QDeclarativeVideoOutput::FillMode stretch = QDeclarativeVideoOutput::Stretch;
QDeclarativeVideoOutput::FillMode fit = QDeclarativeVideoOutput::PreserveAspectFit;
QDeclarativeVideoOutput::FillMode crop = QDeclarativeVideoOutput::PreserveAspectCrop;
// Stretch just keeps the full render rect regardless of orientation
QTest::newRow("s0") << 0 << stretch << QRectF(0,0,150,100);
QTest::newRow("s90") << 90 << stretch << QRectF(0,0,150,100);
QTest::newRow("s180") << 180 << stretch << QRectF(0,0,150,100);
QTest::newRow("s270") << 270 << stretch << QRectF(0,0,150,100);
// Fit depends on orientation
// Source is 200x100, fitting in 150x100 -> 150x75
// or 100x200 -> 50x100
QTest::newRow("f0") << 0 << fit << QRectF(0,12.5f,150,75);
QTest::newRow("f90") << 90 << fit << QRectF(50,0,50,100);
QTest::newRow("f180") << 180 << fit << QRectF(0,12.5,150,75);
QTest::newRow("f270") << 270 << fit << QRectF(50,0,50,100);
// Crop also depends on orientation, may go outside render rect
// 200x100 -> -25,0 200x100
// 100x200 -> 0,-100 150x300
QTest::newRow("c0") << 0 << crop << QRectF(-25,0,200,100);
QTest::newRow("c90") << 90 << crop << QRectF(0,-100,150,300);
QTest::newRow("c180") << 180 << crop << QRectF(-25,0,200,100);
QTest::newRow("c270") << 270 << crop << QRectF(0,-100,150,300);
}
QRectF tst_QDeclarativeVideoOutput::invokeR2R(QObject *object, const char *signature, const QRectF &rect)
{
QRectF r;
const QMetaObject *meta = object->metaObject();
meta->invokeMethod(object, signature, Q_RETURN_ARG(QRectF, r), Q_ARG(QRectF, rect));
return r;
}
QPointF tst_QDeclarativeVideoOutput::invokeP2P(QObject *object, const char *signature, const QPointF &point)
{
QPointF p;
const QMetaObject *meta = object->metaObject();
meta->invokeMethod(object, signature, Q_RETURN_ARG(QPointF, p), Q_ARG(QPointF, point));
return p;
}
QTEST_MAIN(tst_QDeclarativeVideoOutput)
#include "tst_qdeclarativevideooutput.moc"

View File

@@ -28,12 +28,12 @@ SUBDIRS += \
qvideoencodercontrol \
qvideoframe \
qvideosurfaceformat \
qwavedecoder
qwavedecoder \
# Tests depending on private interfaces should only be built if
# these interfaces are exported.
contains (QT_CONFIG, private_tests) {
SUBDIRS += \
qdeclarativeaudio \
qmediaimageviewer \
qmediaimageviewer
}