<Dialog team="m6" id="" name="" date="" time="" description="" subject="">
        <Body>
            <Topics/>
            <Turn nickname="(#user2#)" genid="2">
                <Utterance genid="3" ref="-1" time="01:31:00" date="10/12/2007" oldid="2" color="" topic="">test</Utterance>
                <Utterance genid="4" ref="3" time="01:31:18" date="10/12/2007" oldid="3" color="" topic="">reply</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="14">
                <Utterance genid="15" ref="-1" time="10:30:44" date="10/12/2007" oldid="13" color="" topic="">hello</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="16">
                <Utterance genid="17" ref="-1" time="10:31:02" date="10/12/2007" oldid="14" color="" topic="">hello</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="18">
                <Utterance genid="19" ref="-1" time="10:31:04" date="10/12/2007" oldid="15" color="" topic="">hi all :)</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="20">
                <Utterance genid="21" ref="-1" time="10:31:09" date="10/12/2007" oldid="16" color="" topic="">hello everybody</Utterance>
                <Utterance genid="22" ref="-1" time="10:32:27" date="10/12/2007" oldid="17" color="" topic="">I am here to support Naive Bayes classification method</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="23">
                <Utterance genid="24" ref="-1" time="10:33:02" date="10/12/2007" oldid="18" color="" topic="">My company, Synaptic Tech focuses mainly on Hidden Markov Models for classification</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="25">
                <Utterance genid="26" ref="-1" time="10:34:35" date="10/12/2007" oldid="19" color="" topic="">i am here to speak about the Maximum Entropy technique and it's use in building classifiers</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="27">
                <Utterance genid="28" ref="-1" time="10:34:41" date="10/12/2007" oldid="20" color="" topic="">I 'll try to convince you that Support Vector Machine is the best method for classification</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="29">
                <Utterance genid="30" ref="-1" time="10:34:56" date="10/12/2007" oldid="21" color="" topic="">ok, who wants to go first?</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="31">
                <Utterance genid="32" ref="-1" time="10:35:12" date="10/12/2007" oldid="22" color="" topic="">let's do it alphabetically</Utterance>
                <Utterance genid="33" ref="-1" time="10:35:22" date="10/12/2007" oldid="23" color="" topic="">:)</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="34">
                <Utterance genid="35" ref="-1" time="10:35:58" date="10/12/2007" oldid="24" color="" topic="">ok, you cowards :)</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="36">
                <Utterance genid="37" ref="22" time="10:36:02" date="10/12/2007" oldid="25" color="" topic="">HMM is based on the Bayes inference system, but has a much stronger model</Utterance>
                <Utterance genid="38" ref="-1" time="10:37:28" date="10/12/2007" oldid="26" color="" topic="">It uses the same feature independence assumption</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="39">
                <Utterance genid="40" ref="-1" time="10:37:55" date="10/12/2007" oldid="27" color="" topic="">and why is better for instance than naive bayesian</Utterance>
                <Utterance genid="41" ref="-1" time="10:38:04" date="10/12/2007" oldid="28" color="" topic="">?</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="42">
                <Utterance genid="43" ref="-1" time="10:38:33" date="10/12/2007" oldid="29" color="" topic="">but has a structure similar to a finite state automata which permits it to become a sequence classifier</Utterance>
                <Utterance genid="44" ref="40" time="10:39:18" date="10/12/2007" oldid="30" color="" topic="">you cannot for example apply naive bayes for part of speech tagging</Utterance>
                <Utterance genid="45" ref="-1" time="10:39:54" date="10/12/2007" oldid="31" color="" topic="">which depends largely on the order of the words in the sentence</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="46">
                <Utterance genid="47" ref="-1" time="10:40:01" date="10/12/2007" oldid="32" color="" topic="">good point</Utterance>
                <Utterance genid="48" ref="-1" time="10:41:12" date="10/12/2007" oldid="33" color="" topic="">my model SVM is very powerfull because scales very rapidly</Utterance>
                <Utterance genid="49" ref="-1" time="10:41:23" date="10/12/2007" oldid="34" color="" topic="">It simultaneously minimize the empirical classification error</Utterance>
                <Utterance genid="50" ref="-1" time="10:41:44" date="10/12/2007" oldid="35" color="" topic="">and maximize the geometric margin</Utterance>
                <Utterance genid="51" ref="-1" time="10:42:03" date="10/12/2007" oldid="36" color="" topic="">it is a linear classificator which means it very fast</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="52">
                <Utterance genid="53" ref="50" time="10:42:17" date="10/12/2007" oldid="37" color="" topic="">but let's not forgeting we are dealing with a linear classifier</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="54">
                <Utterance genid="55" ref="-1" time="10:42:40" date="10/12/2007" oldid="38" color="" topic="">yes, indeed</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="56">
                <Utterance genid="57" ref="-1" time="10:43:46" date="10/12/2007" oldid="39" color="" topic="">A Naive Bayes classifier is a probabilistic classifier that uses Bayes' theorem</Utterance>
                <Utterance genid="58" ref="-1" time="10:44:05" date="10/12/2007" oldid="40" color="" topic="">It is called naive because it makes independence assumptions</Utterance>
                <Utterance genid="59" ref="-1" time="10:44:44" date="10/12/2007" oldid="41" color="" topic="">the problem is to determine the probability of a class variable with a small number of outcomes, conditional on several feature variables</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="60">
                <Utterance genid="61" ref="58" time="10:45:21" date="10/12/2007" oldid="42" color="" topic="">one of the main drawbacks of naive Bayes is that it is sot capable of solving more complex classification problems</Utterance>
                <Utterance genid="62" ref="-1" time="10:46:06" date="10/12/2007" oldid="43" color="" topic="">still it shares many aspects with HMM</Utterance>
                <Utterance genid="63" ref="-1" time="10:46:42" date="10/12/2007" oldid="44" color="" topic="">i know that ME coupled with HMM can achieve better results</Utterance>
                <Utterance genid="64" ref="-1" time="10:46:53" date="10/12/2007" oldid="45" color="" topic="">what can you tell as about it (#user1#)?</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="65">
                <Utterance genid="66" ref="61" time="10:46:53" date="10/12/2007" oldid="46" color="" topic="">yes, indeed, but it gives pretty good results for large sets of input data</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="67">
                <Utterance genid="68" ref="-1" time="10:47:27" date="10/12/2007" oldid="47" color="" topic="">indeed, Maximum Entropy combined with HMM can produce</Utterance>
                <Utterance genid="69" ref="-1" time="10:47:51" date="10/12/2007" oldid="48" color="" topic="">good result for complex classification problems</Utterance>
                <Utterance genid="70" ref="-1" time="10:47:59" date="10/12/2007" oldid="49" color="" topic="">that is, problems that</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="71">
                <Utterance genid="72" ref="66" time="10:48:08" date="10/12/2007" oldid="50" color="" topic="">that's the same reason why we chose a linear model to an exponential one like in max entropy</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="73">
                <Utterance genid="74" ref="59" time="10:48:12" date="10/12/2007" oldid="51" color="" topic="">(#user0#) pretty good results is good but not enough :)</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="75">
                <Utterance genid="76" ref="-1" time="10:48:12" date="10/12/2007" oldid="52" color="" topic="">have input data as a sum of observations</Utterance>
                <Utterance genid="77" ref="-1" time="10:49:31" date="10/12/2007" oldid="53" color="" topic="">as (#user2#) said, Maximum Entropy belongs to the family of classifiers known as log-linear</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="78">
                <Utterance genid="79" ref="74" time="10:49:54" date="10/12/2007" oldid="54" color="" topic="">I see your point (#user3#) but your method SVM is prone to overfitting and thus bad generalization</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="80">
                <Utterance genid="81" ref="-1" time="10:50:14" date="10/12/2007" oldid="55" color="" topic="">ME works classifiers. MaxEnt works by extracting some set of features from the input, combining them linearly (meaning that we multiply each by a weight and then add them up)</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="82">
                <Utterance genid="83" ref="-1" time="10:50:51" date="10/12/2007" oldid="56" color="" topic="">(#user2#) not really, more than that for example, in case of e-learning, support vector machine methods could categorize the learning material and present to the student only what he needs to learn</Utterance>
                <Utterance genid="84" ref="-1" time="10:51:22" date="10/12/2007" oldid="57" color="" topic="">so is very pointed to what you need</Utterance>
                <Utterance genid="85" ref="-1" time="10:52:31" date="10/12/2007" oldid="58" color="" topic="">as a matter of fact is gaves better results in automatic learning even that neural networks</Utterance>
                <Utterance genid="86" ref="-1" time="10:52:54" date="10/12/2007" oldid="59" color="" topic="">it is true that has an online processing time bigger than neural networks</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="87">
                <Utterance genid="88" ref="83" time="10:53:04" date="10/12/2007" oldid="60" color="" topic="">I think Naive Bayes is better suited for that purpose as it models very well real time processes with a normal distribution</Utterance>
                <Utterance genid="89" ref="86" time="10:53:36" date="10/12/2007" oldid="61" color="" topic="">and bayes is still much faster than neural nets too</Utterance>
                <Utterance genid="90" ref="-1" time="10:54:00" date="10/12/2007" oldid="62" color="" topic="">but as I said it still has the disadvantage of non-sequence classification</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="91">
                <Utterance genid="92" ref="-1" time="10:54:16" date="10/12/2007" oldid="63" color="" topic="">yes but bayes has a very low accuraty in prediction</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="93">
                <Utterance genid="94" ref="-1" time="10:54:43" date="10/12/2007" oldid="64" color="" topic="">it just takes an observation and based on some features puts in a class</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="95">
                <Utterance genid="96" ref="-1" time="10:54:45" date="10/12/2007" oldid="65" color="" topic="">especially for a large dataset</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="97">
                <Utterance genid="98" ref="96" time="10:55:27" date="10/12/2007" oldid="66" color="" topic="">I'm not here to defend Bayes, (#user0#) should</Utterance>
                <Utterance genid="99" ref="-1" time="10:56:25" date="10/12/2007" oldid="67" color="" topic="">I'm just saying that HMM is more powerful that all of your methods :D</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="100">
                <Utterance genid="101" ref="92" time="10:56:55" date="10/12/2007" oldid="68" color="" topic="">I think Max entropy doesn't handle sparse input data either</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="102">
                <Utterance genid="103" ref="-1" time="10:57:12" date="10/12/2007" oldid="69" color="" topic="">(#user2#) you are just saying, it seems for me that you are not convinced</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="104">
                <Utterance genid="105" ref="-1" time="10:57:18" date="10/12/2007" oldid="70" color="" topic="">indeed, it has lower accuracy results</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="106">
                <Utterance genid="107" ref="-1" time="10:57:21" date="10/12/2007" oldid="71" color="" topic="">:))</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="108">
                <Utterance genid="109" ref="-1" time="10:57:24" date="10/12/2007" oldid="72" color="" topic="">is the input data is sparse</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="110">
                <Utterance genid="111" ref="-1" time="10:57:29" date="10/12/2007" oldid="73" color="" topic="">it has applications in speech, handwriting, gesture recognition, musical score following, partial discharges and bioinformatics</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="112">
                <Utterance genid="113" ref="-1" time="10:57:30" date="10/12/2007" oldid="74" color="" topic="">*if</Utterance>
                <Utterance genid="114" ref="-1" time="10:58:14" date="10/12/2007" oldid="75" color="" topic="">but it can be corrected if you use a kind of smoothing</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="115">
                <Utterance genid="116" ref="103" time="10:58:21" date="10/12/2007" oldid="76" color="" topic="">at least I am arguing for my method</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="117">
                <Utterance genid="118" ref="-1" time="10:58:23" date="10/12/2007" oldid="77" color="" topic="">of the weights called regularisation</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="119">
                <Utterance genid="120" ref="-1" time="10:58:33" date="10/12/2007" oldid="78" color="" topic="">my SVM also coud be used in cuatum physics prediction problems</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="121">
                <Utterance genid="122" ref="-1" time="10:58:40" date="10/12/2007" oldid="79" color="" topic="">i don't see you too convinced about your methods either</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="123">
                <Utterance genid="124" ref="-1" time="10:58:43" date="10/12/2007" oldid="80" color="" topic="">the goal of regularization is to penalize the large weights</Utterance>
                <Utterance genid="125" ref="-1" time="10:59:04" date="10/12/2007" oldid="81" color="" topic="">that the model deduces when the input data is not enough</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="126">
                <Utterance genid="127" ref="120" time="10:59:18" date="10/12/2007" oldid="82" color="" topic="">I don't think we need to move into quantum physics</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="128">
                <Utterance genid="129" ref="120" time="10:59:23" date="10/12/2007" oldid="83" color="" topic="">that's because the quantum probability function is a linear combination</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="130">
                <Utterance genid="131" ref="-1" time="10:59:33" date="10/12/2007" oldid="84" color="" topic="">why not talk about more down-to-earth classfication problems</Utterance>
                <Utterance genid="132" ref="-1" time="10:59:38" date="10/12/2007" oldid="85" color="" topic="">like document classification</Utterance>
                <Utterance genid="133" ref="-1" time="10:59:45" date="10/12/2007" oldid="86" color="" topic="">for example, spam or not-spam classification</Utterance>
                <Utterance genid="134" ref="-1" time="11:00:00" date="10/12/2007" oldid="87" color="" topic="">the Bayes theorem is the main classification method in that field</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="135">
                <Utterance genid="136" ref="-1" time="11:00:02" date="10/12/2007" oldid="88" color="" topic="">that's not always the case with real-life events</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="137">
                <Utterance genid="138" ref="-1" time="11:00:38" date="10/12/2007" oldid="89" color="" topic="">because you said something about document classification</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="139">
                <Utterance genid="140" ref="134" time="11:00:42" date="10/12/2007" oldid="90" color="" topic="">how do you apply naive bayes for spam classification?</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="141">
                <Utterance genid="142" ref="-1" time="11:00:44" date="10/12/2007" oldid="91" color="" topic="">i must disagree with you</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="143">
                <Utterance genid="144" ref="134" time="11:00:54" date="10/12/2007" oldid="92" color="" topic="">indeed, but that is a simple binary classification</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="145">
                <Utterance genid="146" ref="-1" time="11:01:00" date="10/12/2007" oldid="93" color="" topic="">i tested 4 classifiers for 100 000 documents</Utterance>
                <Utterance genid="147" ref="-1" time="11:01:06" date="10/12/2007" oldid="94" color="" topic="">for 1000 classes</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="148">
                <Utterance genid="149" ref="144" time="11:01:28" date="10/12/2007" oldid="95" color="" topic="">yes, that is the main strength of Naive Bayes</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="150">
                <Utterance genid="151" ref="-1" time="11:01:31" date="10/12/2007" oldid="96" color="" topic="">the 4 classifiers were HiddenMarkov, Naive Bayes, SVM and C45</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="152">
                <Utterance genid="153" ref="-1" time="11:01:38" date="10/12/2007" oldid="97" color="" topic="">because often the classification is only binary</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="154">
                <Utterance genid="155" ref="-1" time="11:01:49" date="10/12/2007" oldid="98" color="" topic="">and SVM generated the best results after C45</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="156">
                <Utterance genid="157" ref="-1" time="11:01:50" date="10/12/2007" oldid="99" color="" topic="">spam or not spam, english or foreign language and so on</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="158">
                <Utterance genid="159" ref="153" time="11:02:17" date="10/12/2007" oldid="100" color="" topic="">in my field we are dealing with NLP where we can have up 87 classes or tags as we call them</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="160">
                <Utterance genid="161" ref="-1" time="11:02:18" date="10/12/2007" oldid="101" color="" topic="">SVM at a first sight might seen complicated</Utterance>
                <Utterance genid="162" ref="-1" time="11:02:24" date="10/12/2007" oldid="102" color="" topic="">but it is not</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="163">
                <Utterance genid="164" ref="159" time="11:03:24" date="10/12/2007" oldid="103" color="" topic="">of course, that field is not among Naive Bayes strenghts</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="165">
                <Utterance genid="166" ref="162" time="11:03:28" date="10/12/2007" oldid="104" color="" topic="">excuse me, but in my opinion working with probabilities is much simpler than multidimensional vector algebra</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="167">
                <Utterance genid="168" ref="159" time="11:03:45" date="10/12/2007" oldid="105" color="" topic="">can you give us more details about those classes and the problem you solve?</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="169">
                <Utterance genid="170" ref="168" time="11:04:26" date="10/12/2007" oldid="106" color="" topic="">yes, I'm doing mainly morphological analysis</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="171">
                <Utterance genid="172" ref="-1" time="11:04:42" date="10/12/2007" oldid="107" color="" topic="">yes but if you work with probabilisties the accuracy will be lower</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="173">
                <Utterance genid="174" ref="-1" time="11:04:48" date="10/12/2007" oldid="108" color="" topic="">and particularly part of speech tagging</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="175">
                <Utterance genid="176" ref="-1" time="11:04:52" date="10/12/2007" oldid="109" color="" topic="">i prefer quality</Utterance>
                <Utterance genid="177" ref="-1" time="11:05:01" date="10/12/2007" oldid="110" color="" topic="">which means better accurary</Utterance>
                <Utterance genid="178" ref="-1" time="11:05:11" date="10/12/2007" oldid="111" color="" topic="">*accuracy</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="179">
                <Utterance genid="180" ref="-1" time="11:06:09" date="10/12/2007" oldid="112" color="" topic="">that means based on a corpus of lexical data which is previously tagged, we associate for each word in a new sentence a part of speech tag</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="181">
                <Utterance genid="182" ref="-1" time="11:06:34" date="10/12/2007" oldid="113" color="" topic="">What about Maximum Entropy? Which is its main weakness?</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="183">
                <Utterance genid="184" ref="-1" time="11:07:14" date="10/12/2007" oldid="114" color="" topic="">it is supposed to support the strength parts not the weekness part :))</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="185">
                <Utterance genid="186" ref="177" time="11:07:15" date="10/12/2007" oldid="115" color="" topic="">yes, but sometimes the upper levels that use a classifier need to know how sure an observable belongs to a certain class</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="187">
                <Utterance genid="188" ref="182" time="11:07:22" date="10/12/2007" oldid="116" color="" topic="">i think the main weakness of the model is that it can not model interactions between complex features</Utterance>
                <Utterance genid="189" ref="-1" time="11:07:30" date="10/12/2007" oldid="117" color="" topic="">it has to be done by hand</Utterance>
                <Utterance genid="190" ref="-1" time="11:07:39" date="10/12/2007" oldid="118" color="" topic="">unlike in other models</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="191">
                <Utterance genid="192" ref="-1" time="11:07:40" date="10/12/2007" oldid="119" color="" topic="">ant that's where probabilities show their strength</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="193">
                <Utterance genid="194" ref="-1" time="11:07:42" date="10/12/2007" oldid="120" color="" topic="">such as svm</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="195">
                <Utterance genid="196" ref="184" time="11:07:53" date="10/12/2007" oldid="121" color="" topic="">we have to know for each method where it is best, as there is no perfect method</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="197">
                <Utterance genid="198" ref="-1" time="11:08:29" date="10/12/2007" oldid="122" color="" topic="">as there is often much ambiguity and we cannot we always be certain if it belongs to class or to another</Utterance>
                <Utterance genid="199" ref="188" time="11:09:09" date="10/12/2007" oldid="123" color="" topic="">still ME is the one that works best in conjunction with HMM</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="200">
                <Utterance genid="201" ref="-1" time="11:09:47" date="10/12/2007" oldid="124" color="" topic="">on some data inputs, ME alone can outperform even naive bayes</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="202">
                <Utterance genid="203" ref="-1" time="11:09:57" date="10/12/2007" oldid="125" color="" topic="">and that's why I'm here to propose a collaboration project</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="204">
                <Utterance genid="205" ref="-1" time="11:10:39" date="10/12/2007" oldid="126" color="" topic="">my SVM is suitable for a project collaboration</Utterance>
                <Utterance genid="206" ref="-1" time="11:10:39" date="10/12/2007" oldid="127" color="" topic="">because is flexible</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="207">
                <Utterance genid="208" ref="201" time="11:10:45" date="10/12/2007" oldid="128" color="" topic="">precisely why I want you to help me develop my HMM part of speech tagging system</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="209">
                <Utterance genid="210" ref="-1" time="11:11:11" date="10/12/2007" oldid="129" color="" topic="">and the algorithms can be as simple or as complex as is needed to solve a supervised learning problem</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="211">
                <Utterance genid="212" ref="205" time="11:11:39" date="10/12/2007" oldid="130" color="" topic="">I heard Naive Bayes is suitable for semantic analysis; (#user3#) can you tell me where do you see your method fit in a NLP application?</Utterance>
                <Utterance genid="213" ref="-1" time="11:12:48" date="10/12/2007" oldid="131" color="" topic="">I think google is using some kind of SVM for resolving search queries</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="214">
                <Utterance genid="215" ref="-1" time="11:12:56" date="10/12/2007" oldid="132" color="" topic="">my SVM fits very well in e-learning techniques</Utterance>
                <Utterance genid="216" ref="-1" time="11:13:26" date="10/12/2007" oldid="133" color="" topic="">yes is used in personalized search i think</Utterance>
                <Utterance genid="217" ref="-1" time="11:13:26" date="10/12/2007" oldid="134" color="" topic="">is a beta version for google personalized</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="218">
                <Utterance genid="219" ref="-1" time="11:13:31" date="10/12/2007" oldid="135" color="" topic="">it would be a great achievement if we could use it for answering questions like "Which are the tallest mountains in the world?"</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="220">
                <Utterance genid="221" ref="-1" time="11:13:58" date="10/12/2007" oldid="136" color="" topic="">which i think is based on SVM e-learning technique</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="222">
                <Utterance genid="223" ref="216" time="11:14:22" date="10/12/2007" oldid="137" color="" topic="">I think it's time to start a competition for that project; what do you think about a joint venture?</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="224">
                <Utterance genid="225" ref="203" time="11:15:20" date="10/12/2007" oldid="138" color="" topic="">that sounds interesting</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="226">
                <Utterance genid="227" ref="-1" time="11:15:21" date="10/12/2007" oldid="139" color="" topic="">with even 40%</Utterance>
                <Utterance genid="228" ref="203" time="11:15:21" date="10/12/2007" oldid="140" color="" topic="">what do you have in mind?</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="229">
                <Utterance genid="230" ref="-1" time="11:15:22" date="10/12/2007" oldid="141" color="" topic="">we could use HMM and ME for the morphological parsing</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="232">
                <Utterance genid="233" ref="-1" time="11:16:02" date="10/12/2007" oldid="143" color="" topic="">naive bayes for semantics</Utterance>
                <Utterance genid="234" ref="-1" time="11:16:20" date="10/12/2007" oldid="144" color="" topic="">SVM for personalised search</Utterance>
                <Utterance genid="235" ref="-1" time="11:16:48" date="10/12/2007" oldid="145" color="" topic="">and maybe use one of the methods in pragmatic analysis</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="236">
                <Utterance genid="237" ref="-1" time="11:16:58" date="10/12/2007" oldid="146" color="" topic="">yes, sounds great</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="238">
                <Utterance genid="239" ref="-1" time="11:17:02" date="10/12/2007" oldid="147" color="" topic="">what do you think?</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="240">
                <Utterance genid="241" ref="239" time="11:17:25" date="10/12/2007" oldid="148" color="" topic="">It uses the best of each method, so it's a good idea</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="242">
                <Utterance genid="243" ref="228" time="11:18:09" date="10/12/2007" oldid="149" color="" topic="">(#user1#), are you still with us?</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="244">
                <Utterance genid="245" ref="-1" time="11:18:19" date="10/12/2007" oldid="150" color="" topic="">yes, i am back</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="246">
                <Utterance genid="247" ref="245" time="11:18:29" date="10/12/2007" oldid="151" color="" topic="">so, what do you think?</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="248">
                <Utterance genid="249" ref="-1" time="11:18:29" date="10/12/2007" oldid="152" color="" topic="">my network was down</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="250">
                <Utterance genid="251" ref="245" time="11:18:43" date="10/12/2007" oldid="153" color="" topic="">good</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="252">
                <Utterance genid="253" ref="230" time="11:19:14" date="10/12/2007" oldid="154" color="" topic="">i think HMM and ME could outperform all other methods when it comes to sequence analizers</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="254">
                <Utterance genid="255" ref="253" time="11:19:43" date="10/12/2007" oldid="155" color="" topic="">i disagree, I think each method needs to remain in its own field of use</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="256">
                <Utterance genid="257" ref="253" time="11:19:55" date="10/12/2007" oldid="156" color="" topic="">I cannot agree more</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="258">
                <Utterance genid="259" ref="-1" time="11:20:20" date="10/12/2007" oldid="157" color="" topic="">yes, we have to gain maximul with the most suitable classifier in each particular field</Utterance>
                <Utterance genid="260" ref="-1" time="11:20:39" date="10/12/2007" oldid="158" color="" topic="">*maximum</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="261">
                <Utterance genid="262" ref="233" time="11:20:41" date="10/12/2007" oldid="159" color="" topic="">because Naive Bayes is very simple, it is also fast when used for semantics</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="263">
                <Utterance genid="264" ref="253" time="11:21:23" date="10/12/2007" oldid="160" color="" topic="">sequence models have applications to tasks like speech recognition, sentence segmentation and grapheme-to-phoneme conversion, partial parsing/chunking ,and named entity recognition and information extraction</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="266">
                <Utterance genid="267" ref="-1" time="11:22:04" date="10/12/2007" oldid="162" color="" topic="">that's why sequence classifiers are so powerfull</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="268">
                <Utterance genid="269" ref="-1" time="11:22:31" date="10/12/2007" oldid="163" color="" topic="">we can use our combination of automatic learning technologies to design a sequence of very powerfull web applications</Utterance>
                <Utterance genid="270" ref="-1" time="11:22:48" date="10/12/2007" oldid="164" color="" topic="">personalised search is one example, but we can have much more</Utterance>
                <Utterance genid="271" ref="-1" time="11:23:11" date="10/12/2007" oldid="165" color="" topic="">automatic classification of e-mail based on content in folders - school email, work email...</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="272">
                <Utterance genid="273" ref="262" time="11:23:18" date="10/12/2007" oldid="166" color="" topic="">It's what I heard too, I still have my doubts regarding SVM</Utterance>
                <Utterance genid="274" ref="-1" time="11:23:26" date="10/12/2007" oldid="167" color="" topic="">but we'll just have to see what comes after more R&amp;D</Utterance>
                <Utterance genid="275" ref="-1" time="11:24:00" date="10/12/2007" oldid="168" color="" topic="">I think it's time to stop our technical conversation</Utterance>
                <Utterance genid="276" ref="-1" time="11:24:07" date="10/12/2007" oldid="169" color="" topic="">on focus on scheduling another conference for the bussiness aspect</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="277">
                <Utterance genid="278" ref="-1" time="11:24:29" date="10/12/2007" oldid="170" color="" topic="">yes, I agree</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="279">
                <Utterance genid="280" ref="-1" time="11:24:38" date="10/12/2007" oldid="171" color="" topic="">i also agree</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="281">
                <Utterance genid="282" ref="276" time="11:24:38" date="10/12/2007" oldid="172" color="" topic="">we'll check our schedules and contact your asistant</Utterance>
                <Utterance genid="283" ref="282" time="11:24:52" date="10/12/2007" oldid="173" color="" topic="">to schedule the next meetings</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="284">
                <Utterance genid="285" ref="-1" time="11:24:58" date="10/12/2007" oldid="174" color="" topic="">see you soon then :)</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="286">
                <Utterance genid="287" ref="280" time="11:25:05" date="10/12/2007" oldid="175" color="" topic="">we are already working as a team :P</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="288">
                <Utterance genid="289" ref="283" time="11:25:13" date="10/12/2007" oldid="176" color="" topic="">yes, we are :)</Utterance>
            </Turn>
            <Turn nickname="(#user2#)" genid="290">
                <Utterance genid="291" ref="282" time="11:25:34" date="10/12/2007" oldid="177" color="" topic="">yes, I'll have my secretary contact each of you</Utterance>
                <Utterance genid="292" ref="-1" time="11:25:59" date="10/12/2007" oldid="178" color="" topic="">ok then, nice talking to you</Utterance>
                <Utterance genid="293" ref="-1" time="11:26:02" date="10/12/2007" oldid="179" color="" topic="">good bye</Utterance>
            </Turn>
            <Turn nickname="(#user0#)" genid="294">
                <Utterance genid="295" ref="-1" time="11:26:10" date="10/12/2007" oldid="180" color="" topic="">good bye</Utterance>
            </Turn>
            <Turn nickname="(#user1#)" genid="296">
                <Utterance genid="297" ref="-1" time="11:26:14" date="10/12/2007" oldid="181" color="" topic="">goodbye</Utterance>
            </Turn>
            <Turn nickname="(#user3#)" genid="298">
                <Utterance genid="299" ref="-1" time="11:26:47" date="10/12/2007" oldid="182" color="" topic="">goodbye to all</Utterance>
            </Turn>
        </Body>
    </Dialog>