summaryrefslogtreecommitdiff
path: root/telepathy-farstream
diff options
context:
space:
mode:
authorSimon McVittie <simon.mcvittie@collabora.co.uk>2014-01-08 17:15:53 +0000
committerSimon McVittie <simon.mcvittie@collabora.co.uk>2014-01-08 19:50:42 +0000
commit93f18ce1c28735cc15bdd171322d8b6ce05ebd0b (patch)
treefec2ad84ea10f0aa25ab4e52fc74a3504655797c /telepathy-farstream
parentc0f6432c8de302d83a670943deac44b3817e0a7b (diff)
Move examples into telepathy-farstream/
Diffstat (limited to 'telepathy-farstream')
-rw-r--r--telepathy-farstream/Makefile.am2
-rw-r--r--telepathy-farstream/examples/Makefile.am21
-rw-r--r--telepathy-farstream/examples/call-handler.c646
-rw-r--r--telepathy-farstream/examples/python/Makefile.am7
-rw-r--r--telepathy-farstream/examples/python/README5
-rw-r--r--telepathy-farstream/examples/python/callchannel.py180
-rw-r--r--telepathy-farstream/examples/python/callhandler.py116
-rw-r--r--telepathy-farstream/examples/python/callui.py285
-rw-r--r--telepathy-farstream/examples/python/constants.py66
-rw-r--r--telepathy-farstream/examples/python/element-properties62
-rw-r--r--telepathy-farstream/examples/python/util.py40
11 files changed, 1430 insertions, 0 deletions
diff --git a/telepathy-farstream/Makefile.am b/telepathy-farstream/Makefile.am
index 640fdf550..325169a8f 100644
--- a/telepathy-farstream/Makefile.am
+++ b/telepathy-farstream/Makefile.am
@@ -82,3 +82,5 @@ typelib_DATA = $(INTROSPECTION_GIRS:.gir=.typelib)
CLEANFILES = $(gir_DATA) $(typelib_DATA)
endif
+
+SUBDIRS = . examples
diff --git a/telepathy-farstream/examples/Makefile.am b/telepathy-farstream/examples/Makefile.am
new file mode 100644
index 000000000..fda8876de
--- /dev/null
+++ b/telepathy-farstream/examples/Makefile.am
@@ -0,0 +1,21 @@
+SUBDIRS=python
+
+noinst_PROGRAMS = call-handler
+
+LDADD = \
+ $(top_builddir)/telepathy-farstream/libtelepathy-farstream-1.la \
+ $(GLIB_LIBS) \
+ $(DBUS_LIBS) \
+ $(GST_LIBS) \
+ $(FARSTREAM_LIBS) \
+ $(TELEPATHY_LIBS)
+
+AM_CFLAGS = \
+ -I$(top_srcdir) \
+ -I$(top_builddir) \
+ $(ERROR_CFLAGS) \
+ $(GLIB_CFLAGS) \
+ $(DBUS_CFLAGS) \
+ $(GST_CFLAGS) \
+ $(FARSTREAM_CFLAGS) \
+ $(TELEPATHY_CFLAGS)
diff --git a/telepathy-farstream/examples/call-handler.c b/telepathy-farstream/examples/call-handler.c
new file mode 100644
index 000000000..d26aea411
--- /dev/null
+++ b/telepathy-farstream/examples/call-handler.c
@@ -0,0 +1,646 @@
+/*
+ * call-handler.c
+ * Copyright (C) 2011 Collabora Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include <gst/gst.h>
+#include <telepathy-glib/telepathy-glib.h>
+#include <telepathy-glib/telepathy-glib-dbus.h>
+#include <farstream/fs-element-added-notifier.h>
+#include <farstream/fs-utils.h>
+#include <telepathy-farstream/telepathy-farstream.h>
+
+typedef struct {
+ GstElement *pipeline;
+ guint buswatch;
+ TpChannel *proxy;
+ TfChannel *channel;
+ GList *notifiers;
+
+ guint input_volume;
+ guint output_volume;
+
+ gboolean has_audio_src;
+ gboolean has_video_src;
+
+ GstElement *video_input;
+ GstElement *video_capsfilter;
+
+ guint width;
+ guint height;
+ guint framerate;
+} ChannelContext;
+
+GMainLoop *loop;
+
+static gboolean
+bus_watch_cb (GstBus *bus,
+ GstMessage *message,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+
+ if (context->channel != NULL)
+ tf_channel_bus_message (context->channel, message);
+
+ if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ERROR)
+ {
+ GError *error = NULL;
+ gchar *debug = NULL;
+ gst_message_parse_error (message, &error, &debug);
+ g_printerr ("ERROR from element %s: %s\n",
+ GST_OBJECT_NAME (message->src), error->message);
+ g_printerr ("Debugging info: %s\n", (debug) ? debug : "none");
+ g_error_free (error);
+ g_free (debug);
+ }
+
+ return TRUE;
+}
+
+static void
+on_audio_output_volume_changed (TfContent *content,
+ GParamSpec *spec,
+ GstElement *volume)
+{
+ guint output_volume = 0;
+
+ g_object_get (content, "requested-output-volume", &output_volume, NULL);
+
+ if (output_volume == 0)
+ return;
+
+ g_object_set (volume, "volume", (double)output_volume / 255.0, NULL);
+}
+
+static void
+src_pad_added_cb (TfContent *content,
+ TpHandle handle,
+ FsStream *stream,
+ GstPad *pad,
+ FsCodec *codec,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+ gchar *cstr = fs_codec_to_string (codec);
+ FsMediaType mtype;
+ GstPad *sinkpad;
+ GstElement *element;
+ GstStateChangeReturn ret;
+
+ g_debug ("New src pad: %s", cstr);
+ g_object_get (content, "media-type", &mtype, NULL);
+
+ switch (mtype)
+ {
+ case FS_MEDIA_TYPE_AUDIO:
+ {
+ GstElement *volume = NULL;
+ gchar *tmp_str = g_strdup_printf ("audioconvert ! audioresample "
+ "! volume name=\"output_volume%s\" "
+ "! audioconvert ! autoaudiosink", cstr);
+ element = gst_parse_bin_from_description (tmp_str,
+ TRUE, NULL);
+ g_free (tmp_str);
+
+ tmp_str = g_strdup_printf ("output_volume%s", cstr);
+ volume = gst_bin_get_by_name (GST_BIN (element), tmp_str);
+ g_free (tmp_str);
+
+ tp_g_signal_connect_object (content, "notify::output-volume",
+ G_CALLBACK (on_audio_output_volume_changed),
+ volume, 0);
+
+ gst_object_unref (volume);
+
+ break;
+ }
+ case FS_MEDIA_TYPE_VIDEO:
+ element = gst_parse_bin_from_description (
+ "videoconvert ! videoscale ! autovideosink",
+ TRUE, NULL);
+ break;
+ default:
+ g_warning ("Unknown media type");
+ return;
+ }
+
+ gst_bin_add (GST_BIN (context->pipeline), element);
+ sinkpad = gst_element_get_static_pad (element, "sink");
+ ret = gst_element_set_state (element, GST_STATE_PLAYING);
+ if (ret == GST_STATE_CHANGE_FAILURE)
+ {
+ tp_channel_close_async (TP_CHANNEL (context->proxy), NULL, NULL);
+ g_warning ("Failed to start sink pipeline !?");
+ return;
+ }
+
+ if (GST_PAD_LINK_FAILED (gst_pad_link (pad, sinkpad)))
+ {
+ tp_channel_close_async (TP_CHANNEL (context->proxy), NULL, NULL);
+ g_warning ("Couldn't link sink pipeline !?");
+ return;
+ }
+
+ g_object_unref (sinkpad);
+}
+
+static void
+update_video_parameters (ChannelContext *context, gboolean restart)
+{
+ GstCaps *caps;
+ GstClock *clock;
+
+ if (restart)
+ {
+ /* Assuming the pipeline is in playing state */
+ gst_element_set_locked_state (context->video_input, TRUE);
+ gst_element_set_state (context->video_input, GST_STATE_NULL);
+ }
+
+ g_object_get (context->video_capsfilter, "caps", &caps, NULL);
+ caps = gst_caps_make_writable (caps);
+
+ gst_caps_set_simple (caps,
+ "framerate", GST_TYPE_FRACTION, context->framerate, 1,
+ "width", G_TYPE_INT, context->width,
+ "height", G_TYPE_INT, context->height,
+ NULL);
+
+ g_object_set (context->video_capsfilter, "caps", caps, NULL);
+
+ if (restart)
+ {
+ clock = gst_pipeline_get_clock (GST_PIPELINE (context->pipeline));
+ /* Need to reset the clock if we set the pipeline back to ready by hand */
+ if (clock != NULL)
+ {
+ gst_element_set_clock (context->video_input, clock);
+ g_object_unref (clock);
+ }
+
+ gst_element_set_locked_state (context->video_input, FALSE);
+ gst_element_sync_state_with_parent (context->video_input);
+ }
+}
+
+static void
+on_video_framerate_changed (TfContent *content,
+ GParamSpec *spec,
+ ChannelContext *context)
+{
+ guint framerate;
+
+ g_object_get (content, "framerate", &framerate, NULL);
+
+ if (framerate != 0)
+ context->framerate = framerate;
+
+ update_video_parameters (context, FALSE);
+}
+
+static void
+on_video_resolution_changed (TfContent *content,
+ guint width,
+ guint height,
+ ChannelContext *context)
+{
+ g_assert (width > 0 && height > 0);
+
+ context->width = width;
+ context->height = height;
+
+ update_video_parameters (context, TRUE);
+}
+
+static void
+on_audio_input_volume_changed (TfContent *content,
+ GParamSpec *spec,
+ ChannelContext *context)
+{
+ GstElement *volume;
+ guint input_volume = 0;
+
+ g_object_get (content, "requested-input-volume", &input_volume, NULL);
+
+ if (input_volume == 0)
+ return;
+
+ volume = gst_bin_get_by_name (GST_BIN (context->pipeline), "input_volume");
+ g_object_set (volume, "volume", (double)input_volume / 255.0, NULL);
+ gst_object_unref (volume);
+}
+
+static GstElement *
+setup_audio_source (ChannelContext *context, TfContent *content)
+{
+ GstElement *result;
+ GstElement *volume;
+ gint input_volume = 0;
+
+ result = gst_parse_bin_from_description (
+ "pulsesrc ! audio/x-raw, rate=8000 ! queue"
+ " ! audioconvert ! audioresample"
+ " ! volume name=input_volume ! audioconvert ",
+ TRUE, NULL);
+
+ /* FIXME Need to handle both requested/reported */
+ /* TODO Volume control should be handled in FsIo */
+ g_object_get (content,
+ "requested-input-volume", &input_volume,
+ NULL);
+
+ if (input_volume >= 0)
+ {
+ volume = gst_bin_get_by_name (GST_BIN (result), "input_volume");
+ g_debug ("Requested volume is: %i", input_volume);
+ g_object_set (volume, "volume", (double)input_volume / 255.0, NULL);
+ gst_object_unref (volume);
+ }
+
+ g_signal_connect (content, "notify::requested-input-volume",
+ G_CALLBACK (on_audio_input_volume_changed),
+ context);
+
+ return result;
+}
+
+static GstElement *
+setup_video_source (ChannelContext *context, TfContent *content)
+{
+ GstElement *result, *capsfilter;
+ GstCaps *caps;
+ guint framerate = 0, width = 0, height = 0;
+
+ result = gst_parse_bin_from_description_full (
+ "autovideosrc ! videorate drop-only=1 average-period=20000000000 ! videoscale ! videoconvert ! capsfilter name=c",
+ TRUE, NULL, GST_PARSE_FLAG_FATAL_ERRORS, NULL);
+
+ g_assert (result);
+ capsfilter = gst_bin_get_by_name (GST_BIN (result), "c");
+
+ g_object_get (content,
+ "framerate", &framerate,
+ "width", &width,
+ "height", &height,
+ NULL);
+
+ if (framerate == 0)
+ framerate = 15;
+
+ if (width == 0 || height == 0)
+ {
+ width = 320;
+ height = 240;
+ }
+
+ context->framerate = framerate;
+ context->width = width;
+ context->height = height;
+
+ caps = gst_caps_new_simple ("video/x-raw",
+ "width", G_TYPE_INT, width,
+ "height", G_TYPE_INT, height,
+ "framerate", GST_TYPE_FRACTION, framerate, 1,
+ NULL);
+
+ g_object_set (G_OBJECT (capsfilter), "caps", caps, NULL);
+
+ gst_caps_unref (caps);
+
+ context->video_input = result;
+ context->video_capsfilter = capsfilter;
+
+ g_signal_connect (content, "notify::framerate",
+ G_CALLBACK (on_video_framerate_changed),
+ context);
+
+ g_signal_connect (content, "resolution-changed",
+ G_CALLBACK (on_video_resolution_changed),
+ context);
+
+ return result;
+}
+
+static gboolean
+start_sending_cb (TfContent *content, gpointer user_data)
+{
+ ChannelContext *context = user_data;
+ GstPad *srcpad, *sinkpad;
+ FsMediaType mtype;
+ GstElement *element;
+ GstStateChangeReturn ret;
+ gboolean res = FALSE;
+
+ g_debug ("Start sending");
+
+ g_object_get (content,
+ "sink-pad", &sinkpad,
+ "media-type", &mtype,
+ NULL);
+
+ switch (mtype)
+ {
+ case FS_MEDIA_TYPE_AUDIO:
+ if (context->has_audio_src)
+ goto out;
+
+ element = setup_audio_source (context, content);
+ context->has_audio_src = TRUE;
+ break;
+ case FS_MEDIA_TYPE_VIDEO:
+ if (context->has_video_src)
+ goto out;
+
+ element = setup_video_source (context, content);
+ context->has_video_src = TRUE;
+ break;
+ default:
+ g_warning ("Unknown media type");
+ goto out;
+ }
+
+
+ gst_bin_add (GST_BIN (context->pipeline), element);
+ srcpad = gst_element_get_static_pad (element, "src");
+
+ if (GST_PAD_LINK_FAILED (gst_pad_link (srcpad, sinkpad)))
+ {
+ tp_channel_close_async (TP_CHANNEL (context->proxy), NULL, NULL);
+ g_warning ("Couldn't link source pipeline !?");
+ goto out2;
+ }
+
+ ret = gst_element_set_state (element, GST_STATE_PLAYING);
+ if (ret == GST_STATE_CHANGE_FAILURE)
+ {
+ tp_channel_close_async (TP_CHANNEL (context->proxy), NULL, NULL);
+ g_warning ("source pipeline failed to start!?");
+ goto out2;
+ }
+
+ res = TRUE;
+
+out2:
+ g_object_unref (srcpad);
+out:
+ g_object_unref (sinkpad);
+
+ return res;
+}
+
+static void
+content_added_cb (TfChannel *channel,
+ TfContent *content,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+
+ g_debug ("Content added");
+
+ g_signal_connect (content, "src-pad-added",
+ G_CALLBACK (src_pad_added_cb), context);
+ g_signal_connect (content, "start-sending",
+ G_CALLBACK (start_sending_cb), context);
+}
+
+static void
+conference_added_cb (TfChannel *channel,
+ GstElement *conference,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+ GKeyFile *keyfile;
+
+ g_debug ("Conference added");
+
+ /* Add notifier to set the various element properties as needed */
+ keyfile = fs_utils_get_default_element_properties (conference);
+ if (keyfile != NULL)
+ {
+ FsElementAddedNotifier *notifier;
+ g_debug ("Loaded default codecs for %s", GST_ELEMENT_NAME (conference));
+
+ notifier = fs_element_added_notifier_new ();
+ fs_element_added_notifier_set_properties_from_keyfile (notifier, keyfile);
+ fs_element_added_notifier_add (notifier, GST_BIN (context->pipeline));
+
+ context->notifiers = g_list_prepend (context->notifiers, notifier);
+ }
+
+
+ gst_bin_add (GST_BIN (context->pipeline), conference);
+ gst_element_set_state (conference, GST_STATE_PLAYING);
+}
+
+
+static void
+conference_removed_cb (TfChannel *channel,
+ GstElement *conference,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+
+ gst_element_set_locked_state (conference, TRUE);
+ gst_element_set_state (conference, GST_STATE_NULL);
+ gst_bin_remove (GST_BIN (context->pipeline), conference);
+}
+
+static gboolean
+dump_pipeline_cb (gpointer data)
+{
+ ChannelContext *context = data;
+
+ GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (context->pipeline),
+ GST_DEBUG_GRAPH_SHOW_ALL,
+ "call-handler");
+
+ return TRUE;
+}
+
+static void
+new_tf_channel_cb (GObject *source,
+ GAsyncResult *result,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+ GError *error = NULL;
+
+ g_debug ("New TfChannel");
+
+ context->channel = tf_channel_new_finish (source, result, &error);
+
+ if (context->channel == NULL)
+ {
+ g_error ("Failed to create channel: %s", error->message);
+ g_clear_error (&error);
+ }
+
+ g_debug ("Adding timeout");
+ g_timeout_add_seconds (5, dump_pipeline_cb, context);
+
+ g_signal_connect (context->channel, "fs-conference-added",
+ G_CALLBACK (conference_added_cb), context);
+
+
+ g_signal_connect (context->channel, "fs-conference-removed",
+ G_CALLBACK (conference_removed_cb), context);
+
+ g_signal_connect (context->channel, "content-added",
+ G_CALLBACK (content_added_cb), context);
+}
+
+static void
+proxy_invalidated_cb (TpProxy *proxy,
+ guint domain,
+ gint code,
+ gchar *message,
+ gpointer user_data)
+{
+ ChannelContext *context = user_data;
+
+ g_debug ("Channel closed");
+ if (context->pipeline != NULL)
+ {
+ gst_element_set_state (context->pipeline, GST_STATE_NULL);
+ g_object_unref (context->pipeline);
+ }
+
+ if (context->channel != NULL)
+ g_object_unref (context->channel);
+
+ g_list_foreach (context->notifiers, (GFunc) g_object_unref, NULL);
+ g_list_free (context->notifiers);
+
+ g_object_unref (context->proxy);
+
+ g_slice_free (ChannelContext, context);
+
+ g_main_loop_quit (loop);
+}
+
+static void
+new_call_channel_cb (TpSimpleHandler *handler,
+ TpAccount *account,
+ TpConnection *connection,
+ GList *channels,
+ GList *requests_satisfied,
+ gint64 user_action_time,
+ TpHandleChannelsContext *handler_context,
+ gpointer user_data)
+{
+ ChannelContext *context;
+ TpChannel *proxy;
+ GstBus *bus;
+ GstElement *pipeline;
+ GstStateChangeReturn ret;
+
+ g_debug ("New channel");
+
+ proxy = channels->data;
+
+ pipeline = gst_pipeline_new (NULL);
+
+ ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
+
+ if (ret == GST_STATE_CHANGE_FAILURE)
+ {
+ tp_channel_close_async (TP_CHANNEL (proxy), NULL, NULL);
+ g_object_unref (pipeline);
+ g_warning ("Failed to start an empty pipeline !?");
+ return;
+ }
+
+ context = g_slice_new0 (ChannelContext);
+ context->pipeline = pipeline;
+
+ bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
+ context->buswatch = gst_bus_add_watch (bus, bus_watch_cb, context);
+ g_object_unref (bus);
+
+ tf_channel_new_async (proxy, new_tf_channel_cb, context);
+
+ tp_handle_channels_context_accept (handler_context);
+
+ tp_call_channel_accept_async (TP_CALL_CHANNEL (proxy), NULL, NULL);
+
+ context->proxy = g_object_ref (proxy);
+ g_signal_connect (proxy, "invalidated",
+ G_CALLBACK (proxy_invalidated_cb),
+ context);
+}
+
+int
+main (int argc, char **argv)
+{
+ TpBaseClient *client;
+ TpAccountManager *am;
+
+ gst_init (&argc, &argv);
+
+ loop = g_main_loop_new (NULL, FALSE);
+
+ am = tp_account_manager_dup ();
+
+ client = tp_simple_handler_new_with_am (am,
+ FALSE,
+ FALSE,
+ "TpFsCallHandlerDemo",
+ TRUE,
+ new_call_channel_cb,
+ NULL,
+ NULL);
+
+ tp_base_client_take_handler_filter (client,
+ tp_asv_new (
+ TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING,
+ TP_IFACE_CHANNEL_TYPE_CALL1,
+ TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT,
+ TP_HANDLE_TYPE_CONTACT,
+ TP_PROP_CHANNEL_TYPE_CALL1_INITIAL_AUDIO, G_TYPE_BOOLEAN,
+ TRUE,
+ NULL));
+
+ tp_base_client_take_handler_filter (client,
+ tp_asv_new (
+ TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING,
+ TP_IFACE_CHANNEL_TYPE_CALL1,
+ TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT,
+ TP_HANDLE_TYPE_CONTACT,
+ TP_PROP_CHANNEL_TYPE_CALL1_INITIAL_VIDEO, G_TYPE_BOOLEAN,
+ TRUE,
+ NULL));
+
+ tp_base_client_add_handler_capabilities_varargs (client,
+ TP_IFACE_CHANNEL_TYPE_CALL1 "/video/h264",
+ TP_TOKEN_CHANNEL_TYPE_CALL1_SHM,
+ TP_TOKEN_CHANNEL_TYPE_CALL1_ICE,
+ TP_TOKEN_CHANNEL_TYPE_CALL1_GTALK_P2P,
+ NULL);
+
+ tp_base_client_register (client, NULL);
+
+ g_main_loop_run (loop);
+
+ g_object_unref (am);
+ g_object_unref (client);
+ g_main_loop_unref (loop);
+
+ return 0;
+}
diff --git a/telepathy-farstream/examples/python/Makefile.am b/telepathy-farstream/examples/python/Makefile.am
new file mode 100644
index 000000000..7b89027c2
--- /dev/null
+++ b/telepathy-farstream/examples/python/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST = \
+ README \
+ callchannel.py \
+ callhandler.py \
+ callui.py \
+ constants.py \
+ util.py
diff --git a/telepathy-farstream/examples/python/README b/telepathy-farstream/examples/python/README
new file mode 100644
index 000000000..8007df6b5
--- /dev/null
+++ b/telepathy-farstream/examples/python/README
@@ -0,0 +1,5 @@
+Simple python example using telepathy-farstream in most minimal way possible.
+Two programs are included:
+
+callui.py: Doesn't do anything with tp-fs, but allows the start of a Call call
+callhandler.py: Simple handler that handles calls and handles the media
diff --git a/telepathy-farstream/examples/python/callchannel.py b/telepathy-farstream/examples/python/callchannel.py
new file mode 100644
index 000000000..f37c7243b
--- /dev/null
+++ b/telepathy-farstream/examples/python/callchannel.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+#
+# callchannel.py
+# Copyright (C) 2008-2010 Collabora Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import dbus
+import dbus.glib
+import gobject
+import sys
+from glib import GError
+
+import pygst
+pygst.require("0.10")
+import gst
+
+import tpfarstream
+import farstream
+from util import *
+import gc
+
+from telepathy.client.channel import Channel
+from telepathy.constants import (
+ CONNECTION_HANDLE_TYPE_NONE, CONNECTION_HANDLE_TYPE_CONTACT,
+ CONNECTION_STATUS_CONNECTED, CONNECTION_STATUS_DISCONNECTED,
+ MEDIA_STREAM_STATE_CONNECTED
+ )
+from telepathy.interfaces import (
+ CHANNEL_INTERFACE, CONN_INTERFACE,
+ CONNECTION_INTERFACE_REQUESTS,
+ CONNECTION_INTERFACE_CONTACT_CAPABILITIES,
+ CLIENT)
+
+from constants import *
+
+class CallChannel:
+ def __init__ (self, bus, connection, object_path, properties):
+ self.bus = bus
+ self.conn = connection
+ self.tfchannel = None
+
+ self.obj = self.bus.get_object (self.conn.service_name, object_path)
+ self.obj.connect_to_signal ("CallStateChanged",
+ self.state_changed_cb, dbus_interface=CHANNEL_TYPE_CALL)
+
+ self.pipeline = gst.Pipeline()
+ self.pipeline.get_bus().add_watch(self.async_handler)
+
+ self.notifier = notifier = farstream.ElementAddedNotifier()
+ notifier.set_properties_from_file("element-properties")
+ notifier.add(self.pipeline)
+
+ tpfarstream.tf_channel_new_async (connection.service_name,
+ connection.object_path, object_path, self.tpfs_created)
+
+ def state_changed_cb(self, state, flags, reason, details):
+ print "* StateChanged:\n State: %s (%d)\n Flags: %s" % (
+ call_state_to_s (state), state, call_flags_to_s (flags))
+
+ print "\tReason: actor: %d reason: %d dbus_reason: '%s'" % (
+ reason[0], reason[1], reason[2])
+
+ print '\tDetails:'
+ for key, value in details.iteritems():
+ print "\t %s: %s" % (key, value)
+ else:
+ print '\t None'
+
+ if state == CALL_STATE_ENDED:
+ self.close()
+
+ def accept (self):
+ self.obj.Accept(dbus_interface=CHANNEL_TYPE_CALL)
+
+ def close (self):
+ print "Closing the channel"
+ # close and cleanup
+ self.obj.Close(dbus_interface=CHANNEL_INTERFACE)
+
+ self.pipeline.set_state (gst.STATE_NULL)
+ self.pipeline = None
+
+ self.tfchannel = None
+ self.notifier = None
+
+ def async_handler (self, bus, message):
+ if self.tfchannel != None:
+ self.tfchannel.bus_message(message)
+ return True
+
+ self.pipeline = gst.Pipeline()
+
+ def tpfs_created (self, source, result):
+ tfchannel = self.tfchannel = source.new_finish(result)
+ tfchannel.connect ("fs-conference-added", self.conference_added)
+ tfchannel.connect ("content-added", self.content_added)
+
+
+ def src_pad_added (self, content, handle, stream, pad, codec):
+ type = content.get_property ("media-type")
+ if type == farstream.MEDIA_TYPE_AUDIO:
+ sink = gst.parse_bin_from_description("audioconvert ! audioresample ! audioconvert ! autoaudiosink", True)
+ elif type == farstream.MEDIA_TYPE_VIDEO:
+ sink = gst.parse_bin_from_description("ffmpegcolorspace ! videoscale ! autovideosink", True)
+
+ self.pipeline.add(sink)
+ pad.link(sink.get_pad("sink"))
+ sink.set_state(gst.STATE_PLAYING)
+
+ def get_codec_config (self, media_type):
+ if media_type == farstream.MEDIA_TYPE_VIDEO:
+ codecs = [ farstream.Codec(farstream.CODEC_ID_ANY, "H264",
+ farstream.MEDIA_TYPE_VIDEO, 0) ]
+ if self.conn.GetProtocol() == "sip" :
+ codecs += [ farstream.Codec(farstream.CODEC_ID_DISABLE, "THEORA",
+ farstream.MEDIA_TYPE_VIDEO, 0) ]
+ else:
+ codecs += [ farstream.Codec(farstream.CODEC_ID_ANY, "THEORA",
+ farstream.MEDIA_TYPE_VIDEO, 0) ]
+ codecs += [
+ farstream.Codec(farstream.CODEC_ID_ANY, "H263",
+ farstream.MEDIA_TYPE_VIDEO, 0),
+ farstream.Codec(farstream.CODEC_ID_DISABLE, "DV",
+ farstream.MEDIA_TYPE_VIDEO, 0),
+ farstream.Codec(farstream.CODEC_ID_ANY, "JPEG",
+ farstream.MEDIA_TYPE_VIDEO, 0),
+ farstream.Codec(farstream.CODEC_ID_ANY, "MPV",
+ farstream.MEDIA_TYPE_VIDEO, 0),
+ ]
+
+ else:
+ codecs = [
+ farstream.Codec(farstream.CODEC_ID_ANY, "SPEEX",
+ farstream.MEDIA_TYPE_AUDIO, 16000 ),
+ farstream.Codec(farstream.CODEC_ID_ANY, "SPEEX",
+ farstream.MEDIA_TYPE_AUDIO, 8000 )
+ ]
+ return codecs
+
+ def content_added(self, channel, content):
+ sinkpad = content.get_property ("sink-pad")
+
+ mtype = content.get_property ("media-type")
+ prefs = self.get_codec_config (mtype)
+ if prefs != None:
+ try:
+ content.set_codec_preferences(prefs)
+ except GError, e:
+ print e.message
+
+ content.connect ("src-pad-added", self.src_pad_added)
+
+ if mtype == farstream.MEDIA_TYPE_AUDIO:
+ src = gst.parse_bin_from_description("audiotestsrc is-live=1 ! " \
+ "queue", True)
+ elif mtype == farstream.MEDIA_TYPE_VIDEO:
+ src = gst.parse_bin_from_description("videotestsrc is-live=1 ! " \
+ "capsfilter caps=video/x-raw-yuv,width=320,height=240", True)
+
+ self.pipeline.add(src)
+ src.get_pad("src").link(sinkpad)
+ src.set_state(gst.STATE_PLAYING)
+
+ def conference_added (self, channel, conference):
+ self.pipeline.add(conference)
+ self.pipeline.set_state(gst.STATE_PLAYING)
+
diff --git a/telepathy-farstream/examples/python/callhandler.py b/telepathy-farstream/examples/python/callhandler.py
new file mode 100644
index 000000000..71af24cd9
--- /dev/null
+++ b/telepathy-farstream/examples/python/callhandler.py
@@ -0,0 +1,116 @@
+# callhandler.py
+# Copyright (C) 2008-2010 Collabora Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import gobject
+# Need gio so GAsyncInitialbe is known
+import gio
+
+import dbus
+from dbus.mainloop.glib import DBusGMainLoop
+DBusGMainLoop(set_as_default=True)
+
+from constants import *
+from telepathy.interfaces import CHANNEL_INTERFACE, CLIENT, CLIENT_HANDLER
+from telepathy.constants import CONNECTION_HANDLE_TYPE_CONTACT, CONNECTION_HANDLE_TYPE_ROOM
+import telepathy
+
+from callchannel import CallChannel
+
+class CallHandler(dbus.service.Object, telepathy.server.DBusProperties):
+ def __init__(self, bus, bus_name = None):
+ self.bus = bus
+ if bus_name == None:
+ self.bus_name = "org.freedesktop.Telepathy.Client.CallDemo" \
+ + bus.get_unique_name().replace(":", "_").replace(".","_")
+ else:
+ self.bus_name = bus_name
+ self.path = "/" + self.bus_name.replace(".", "/")
+ self._interfaces = set([CLIENT, CLIENT_HANDLER])
+ self._prop_getters = {}
+ self._prop_setters = {}
+
+ dbus.service.Object.__init__(self, bus, self.path)
+ telepathy.server.DBusProperties.__init__(self)
+
+ self._name = dbus.service.BusName (self.bus_name, bus)
+
+ self._implement_property_get (CLIENT,
+ { "Interfaces": self._get_interfaces } )
+ self._implement_property_get (CLIENT_HANDLER,
+ { "HandlerChannelFilter": self._get_filters } )
+ self._implement_property_get (CLIENT_HANDLER,
+ { "Capabilities": self._get_capabilities } )
+
+ def _get_interfaces(self):
+ return dbus.Array(self._interfaces, signature='s')
+
+ def _get_filters(self):
+ return dbus.Array ([
+ { CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + ".TargetHandleType":
+ CONNECTION_HANDLE_TYPE_CONTACT,
+ CALL_INITIAL_AUDIO: True,
+ },
+ { CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + ".TargetHandleType":
+ CONNECTION_HANDLE_TYPE_CONTACT,
+ CALL_INITIAL_VIDEO: True,
+ },
+ { CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + ".TargetHandleType":
+ CONNECTION_HANDLE_TYPE_ROOM,
+ CALL_INITIAL_AUDIO: True,
+ },
+ { CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + ".TargetHandleType":
+ CONNECTION_HANDLE_TYPE_ROOM,
+ CALL_INITIAL_VIDEO: True,
+ }
+ ],
+ signature='a{sv}')
+
+ def _get_capabilities(self):
+ return dbus.Array ([
+ CHANNEL_TYPE_CALL + '/gtalk-p2p',
+ CHANNEL_TYPE_CALL + '/ice-udp',
+ CHANNEL_TYPE_CALL + '/video/h264',
+ ], signature='s')
+
+ def do_handle_call_channel (self, requests, bus, conn, channel, properties):
+ cchannel = CallChannel(self.bus, conn, channel, properties)
+ cchannel.accept()
+
+ @dbus.service.method(dbus_interface=CLIENT_HANDLER,
+ in_signature='ooa(oa{sv})aota{sv}',
+ async_callbacks= ('_success', '_error'))
+ def HandleChannels(self, account, connection, channels,
+ requests, time, info, _success, _error):
+
+ conn = telepathy.client.Connection (connection[1:].replace('/','.'),
+ connection)
+ # Assume there can be only one
+ (channel, properties) = channels[0]
+
+ _success()
+ self.do_handle_call_channel (requests,
+ self.bus, conn, channel, properties);
+
+if __name__ == '__main__':
+ gobject.threads_init()
+ loop = gobject.MainLoop()
+ CallHandler(dbus.SessionBus())
+ loop.run()
diff --git a/telepathy-farstream/examples/python/callui.py b/telepathy-farstream/examples/python/callui.py
new file mode 100644
index 000000000..9e7558f6c
--- /dev/null
+++ b/telepathy-farstream/examples/python/callui.py
@@ -0,0 +1,285 @@
+#!/usr/bin/env python
+#
+# callui.py
+# Copyright (C) 2008-2010 Collabora Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import gobject
+gobject.threads_init()
+
+import pygtk
+import gtk
+
+gtk.gdk.threads_init()
+
+import dbus
+from dbus.mainloop.glib import DBusGMainLoop
+DBusGMainLoop(set_as_default=True)
+
+import sys
+import time
+
+from telepathy.interfaces import *
+from telepathy.constants import *
+
+from constants import *
+
+class CallChannelRequest:
+ def __init__ (self, bus, account_path, contact,
+ preferred_handler = "", audio = True, video = False,
+ calltype = HANDLE_TYPE_CONTACT):
+ self.bus = bus
+ self.cd = bus.get_object (CHANNEL_DISPATCHER,
+ '/' + CHANNEL_DISPATCHER.replace('.', '/'))
+
+ props = {
+ CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + ".TargetHandleType": calltype,
+ CHANNEL_INTERFACE + ".TargetID": contact,
+ }
+
+ if audio:
+ props[CHANNEL_TYPE_CALL + ".InitialAudio"] = True
+ if video:
+ props[CHANNEL_TYPE_CALL + ".InitialVideo"] = True
+
+ self.request_path = req_path = self.cd.CreateChannel(account_path,
+ props,
+ 0,
+ preferred_handler,
+ dbus_interface = CHANNEL_DISPATCHER)
+
+ self.req = self.bus.get_object (CHANNEL_DISPATCHER, req_path)
+ self.req.connect_to_signal("Failed", self.req_failed)
+ self.req.connect_to_signal("Succeeded", self.req_succeeded)
+ self.req.Proceed(dbus_interface = CHANNEL_REQUEST)
+
+ def req_failed(self, error, message):
+ print "FAILURE: %s (%s)"% (error, message)
+
+ def req_succeeded(self):
+ pass
+
+class Account:
+ CALL_CLASS = {
+ CHANNEL_INTERFACE + '.ChannelType': CHANNEL_TYPE_CALL,
+ CHANNEL_INTERFACE + '.TargetHandleType': HANDLE_TYPE_CONTACT
+ }
+
+ def __init__(self, bus, path):
+ self.bus = bus
+ self.path = path
+ self.obj = bus.get_object (ACCOUNT_MANAGER, path)
+ self.properties = self.obj.GetAll (ACCOUNT,
+ dbus_interface=dbus.PROPERTIES_IFACE)
+
+ def get_path(self):
+ return self.path
+
+ def name(self):
+ return self.properties["DisplayName"]
+
+ def has_connection(self):
+ return self.properties["Connection"] != "/"
+
+ def get_contacts(self):
+ path = self.properties["Connection"]
+ if path == "/":
+ return []
+
+ conn = self.bus.get_object (path[1:].replace("/","."), path)
+ yours, channel, properties = conn.EnsureChannel (
+ { CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_CONTACT_LIST,
+ CHANNEL_INTERFACE + ".TargetHandleType": HANDLE_TYPE_LIST,
+ CHANNEL_INTERFACE + ".TargetID": "subscribe"
+ },
+ dbus_interface = CONNECTION_INTERFACE_REQUESTS
+ )
+
+ subscribe = self.bus.get_object (conn.bus_name, channel)
+ members = subscribe.Get(CHANNEL_INTERFACE_GROUP, "Members",
+ dbus_interface = dbus.PROPERTIES_IFACE)
+
+ caps = conn.GetContactCapabilities (members,
+ dbus_interface = CONNECTION_INTERFACE_CONTACT_CAPABILITIES)
+ members = caps.keys()
+
+ for k, v in caps.iteritems():
+ for c in v:
+ if c[0][CHANNEL_TYPE] == CHANNEL_TYPE_CALL:
+ break
+ else:
+ members.remove (k)
+
+ attributes = conn.GetContactAttributes (
+ dbus.Array(members, signature="u"),
+ dbus.Array([], signature="s"),
+ True)
+
+ return map (lambda v: v[CONNECTION + "/contact-id"],
+ attributes.itervalues())
+
+ def supports_calls(self):
+ path = self.properties["Connection"]
+ if path == "/":
+ return False
+
+ conn = self.bus.get_object (path[1:].replace("/","."), path)
+ classes = conn.Get (CONNECTION_INTERFACE_REQUESTS,
+ 'RequestableChannelClasses', dbus_interface=dbus.PROPERTIES_IFACE)
+
+ return len ([c for c in classes if c[0] == self.CALL_CLASS]) > 0
+
+class UI(gtk.Window):
+ WIDTH=240
+ HEIGHT=-1
+ def __init__ (self, bus):
+ gtk.Window.__init__(self)
+ self.connect('destroy', lambda x: gtk.main_quit())
+ self.set_resizable(False)
+ self.set_size_request(self.WIDTH, self.HEIGHT)
+
+ vbox = gtk.VBox(False, 3)
+ self.add(vbox)
+
+ # call type combo box
+ self.type_store = gtk.ListStore (
+ gobject.TYPE_STRING,
+ gobject.TYPE_UINT)
+
+ self.type_store.append (("1-to-1", CONNECTION_HANDLE_TYPE_CONTACT))
+ self.type_store.append (("Conference",
+ CONNECTION_HANDLE_TYPE_ROOM))
+
+ self.type_combo = combobox = gtk.ComboBox (self.type_store)
+ vbox.pack_start(combobox, False)
+
+ renderer = gtk.CellRendererText()
+ combobox.pack_start(renderer, True)
+ combobox.set_attributes(renderer, text=0)
+ combobox.set_active (0)
+
+ # account combo box
+ self.store = gtk.ListStore (gobject.TYPE_STRING,
+ gobject.TYPE_BOOLEAN,
+ gobject.TYPE_PYOBJECT)
+ self.store.set_sort_func(0,
+ (lambda m, i0, i1:
+ { True: -1, False: 1}[m.get(i0, 0) < m.get(i1, 0)] ))
+ self.store.set_sort_column_id(0, gtk.SORT_ASCENDING)
+
+ f = self.store.filter_new()
+ f.set_visible_func(self.filter_visible)
+ self.account_combo = combobox = gtk.ComboBox(f)
+ vbox.pack_start(combobox, False)
+
+ renderer = gtk.CellRendererText()
+ combobox.pack_start(renderer, True)
+ combobox.set_attributes(renderer, text=0)
+ combobox.connect('changed', self.account_selected)
+
+ # contact entry box
+ self.contact_store = gtk.ListStore(gobject.TYPE_STRING)
+
+ completion = gtk.EntryCompletion ()
+ completion.set_model(self.contact_store)
+ completion.set_text_column(0)
+
+ self.contact_store.set_sort_func(0, self.contact_sort)
+ self.contact_store.set_sort_column_id(0, gtk.SORT_ASCENDING)
+
+ self.contact_combo = combobox = gtk.ComboBoxEntry(self.contact_store)
+ combobox.get_child().set_completion(completion)
+
+ vbox.pack_start(combobox, False)
+
+ bbox = gtk.HButtonBox()
+ bbox.set_layout(gtk.BUTTONBOX_END)
+ vbox.pack_start(bbox, True, False, 3)
+
+ call = gtk.Button("Audio call")
+ call.connect("clicked", self.start_call)
+ bbox.add(call)
+
+ call = gtk.Button("Video call")
+ call.connect("clicked",
+ lambda button: self.start_call(button, video=True))
+ bbox.add(call)
+
+ self.show_all()
+
+ self.bus = bus
+ self.account_mgr = bus.get_object (ACCOUNT_MANAGER,
+ '/' + ACCOUNT_MANAGER.replace('.', '/'))
+ self.get_accounts()
+
+ def start_call(self, button, audio=True, video=False):
+ i = self.type_combo.get_active_iter()
+ (calltype, ) = self.type_combo.get_model().get(i, 1)
+
+ i = self.account_combo.get_active_iter()
+ (account, ) = self.account_combo.get_model().get(i, 2)
+
+ contact = self.contact_combo.get_active_text().strip()
+
+ print "* starting %s call" % ('video' if video else 'audio')
+ CallChannelRequest (self.bus, account.path, contact,
+ audio=audio, video=video, calltype=calltype)
+
+ def contact_sort (self, model, i0, i1):
+ if model.get(i0, 0)[0] < model.get(i1, 0)[0]:
+ return -1
+ else:
+ return 0
+
+ def filter_visible(self, model, titer):
+ return model.get(titer, 1)[0]
+
+ def account_selected (self, combobox):
+ iter = combobox.get_active_iter()
+ if iter == None:
+ return None
+
+ (account,) = combobox.get_model().get(iter, 2)
+
+ self.contact_store.clear()
+
+ map(lambda x: self.contact_store.insert (0, (x,)),
+ account.get_contacts())
+
+ def bail (self, *args):
+ print "BAILING"
+ print args
+ gtk.main_quit()
+
+ def got_accounts(self, accounts):
+ for x in accounts:
+ a = Account(self.bus, x)
+ if a.supports_calls():
+ self.store.insert(0, (a.name(), a.has_connection(), a))
+ self.account_combo.set_active(0)
+
+ def get_accounts (self):
+ self.account_mgr.Get(ACCOUNT_MANAGER, "ValidAccounts",
+ dbus_interface = dbus.PROPERTIES_IFACE,
+ reply_handler = self.got_accounts,
+ error_handler = self.bail)
+
+if __name__ == '__main__':
+ bus = dbus.SessionBus()
+
+ UI(bus)
+ gtk.main()
diff --git a/telepathy-farstream/examples/python/constants.py b/telepathy-farstream/examples/python/constants.py
new file mode 100644
index 000000000..43af8baf2
--- /dev/null
+++ b/telepathy-farstream/examples/python/constants.py
@@ -0,0 +1,66 @@
+# constants.py
+# Copyright (C) 2008-2010 Collabora Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+from telepathy.interfaces import CHANNEL_INTERFACE
+
+CHANNEL = CHANNEL_INTERFACE
+
+CHANNEL_TYPE = CHANNEL + ".ChannelType"
+CHANNEL_TYPE_CALL = CHANNEL + ".Type.Call1"
+CALL_INITIAL_AUDIO = CHANNEL_TYPE_CALL + '.InitialAudio'
+CALL_INITIAL_VIDEO = CHANNEL_TYPE_CALL + '.InitialVideo'
+CALL_MUTABLE_CONTENTS = CHANNEL_TYPE_CALL + '.MutableContents'
+
+CALL_CONTENT = 'org.freedesktop.Telepathy.Call1.Content'
+CALL_CONTENT_IFACE_MEDIA = \
+ 'org.freedesktop.Telepathy.Call1.Content.Interface.Media'
+
+CALL_CONTENT_CODECOFFER = \
+ 'org.freedesktop.Telepathy.Call1.Content.CodecOffer'
+
+CALL_STREAM = 'org.freedesktop.Telepathy.Call1.Stream'
+CALL_STREAM_IFACE_MEDIA = \
+ 'org.freedesktop.Telepathy.Call1.Stream.Interface.Media'
+
+CALL_STREAM_ENDPOINT = 'org.freedesktop.Telepathy.Call1.Stream.Endpoint'
+
+STREAM_TRANSPORT_RAW_UDP = 1
+STREAM_TRANSPORT_ICE_UDP = 2
+STREAM_TRANSPORT_GTALK_P2P = 3
+STREAM_TRANSPORT_WLM_2009 = 4
+STREAM_TRANSPORT_SHM = 5
+STREAM_TRANSPORT_MULTICAST = 6
+STREAM_TRANSPOR_DUMMY = 0xff
+
+CALL_STATE_UNKNOWN = 0
+CALL_STATE_PENDING_INITIATOR = 1
+CALL_STATE_PENDING_RECEIVER = 2
+CALL_STATE_ACCEPTED = 3
+CALL_STATE_ENDED = 4
+
+CALL_FLAG_LOCALLY_RINGING = 1
+CALL_FLAG_QUEUED = 2
+CALL_FLAG_LOCALLY_HELD = 4
+CALL_FLAG_FORWARDED = 8
+CALL_FLAG_IN_PROGRESS = 16
+CALL_FLAG_CLEARING = 32
+
+CALL_STATE_CHANGE_REASON_UNKNOWN = 0
+CALL_STATE_CHANGE_REASON_REQUESTED = 1
+
+CONTENT_PACKETIZATION_RTP = 0
+CONTENT_PACKETIZATION_RAW = 1
diff --git a/telepathy-farstream/examples/python/element-properties b/telepathy-farstream/examples/python/element-properties
new file mode 100644
index 000000000..40f706d6e
--- /dev/null
+++ b/telepathy-farstream/examples/python/element-properties
@@ -0,0 +1,62 @@
+# Put the desired properties in the style of
+#
+# [element name]
+# prop1=val1
+
+[gstrtpbin]
+latency=100
+
+[x264enc]
+byte-stream=1
+bframes=0
+b-adapt=0
+cabac=0
+dct8x8=0
+bitrate=256
+# tuned for zero latency
+tune=0x4
+profile=1
+speed-preset=3
+sliced-threads=false
+
+[ffenc_h263]
+rtp-payload-size=1
+
+[theoraenc]
+bitrate=256
+
+[vp8enc]
+bitrate=256000
+max-latency=1
+speed=2
+error-resilient=true
+
+# Work around bug in the re-timestamp slaving method in
+# GStreamer (2 is skew)
+[alsasrc]
+slave-method=2
+
+[osssrc]
+slave-method=2
+
+[oss4src]
+slave-method=2
+
+[sunaudiosrc]
+slave-method=2
+
+[rtph264pay]
+config-interval=5
+
+[rtppcmupay]
+ptime-multiple=20000000
+
+[rtppcmapay]
+ptime-multiple=20000000
+
+[gstrtpjitterbuffer]
+do-lost=1
+
+[ewh264enc]
+profile=baseline
+quality=5
diff --git a/telepathy-farstream/examples/python/util.py b/telepathy-farstream/examples/python/util.py
new file mode 100644
index 000000000..bbad9c852
--- /dev/null
+++ b/telepathy-farstream/examples/python/util.py
@@ -0,0 +1,40 @@
+# util.py
+# Copyright (C) 2008-2010 Collabora Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+from constants import *
+
+def call_state_to_s(state):
+ return {
+ CALL_STATE_UNKNOWN: 'Unknown',
+ CALL_STATE_PENDING_INITIATOR: 'Pending Initiator',
+ CALL_STATE_PENDING_RECEIVER: 'Pending Receiver',
+ CALL_STATE_ACCEPTED: 'Accepted',
+ CALL_STATE_ENDED: 'Ended'
+ }[state]
+
+def call_flags_to_s(flags):
+ flag_strs = {
+ CALL_FLAG_LOCALLY_RINGING: 'Locally Ringing',
+ CALL_FLAG_QUEUED: 'Queued',
+ CALL_FLAG_LOCALLY_HELD: 'Locally Held',
+ CALL_FLAG_FORWARDED: 'Forwarded',
+ CALL_FLAG_IN_PROGRESS: 'In Progress',
+ CALL_FLAG_CLEARING: 'Clearing'
+ }
+
+ return ' | '.join([ '%s (%d)' % (flag_strs[i], i)
+ for i in flag_strs.keys() if flags & i ]) or 'None'