// Cumulative N(0,1) and Inverse Cumulative N(0,1) #include <iostream> #include <vector> #include <cmath> using namespace std; double f(double); // N(0,1) density double N(double); // N(0,1) cdf double Ni(double); // N(0,1) inverse cdf double Boole(double, double, int); // Boole's numerical integration double Bisection(double, double, double); // Bisection algorithm int main() { cout << "A few cumulative N(0,1) probabilities" << endl; cout << "N(1.96) = " << N(1.96) << endl; cout << "N(2.33) = " << N(2.33) << endl; cout << "N(-1.96) = " << N(-1.96) << endl; cout << "N(-2.33) = " << N(-2.33) << endl; cout << endl; cout << "A few inverse cumulative N(0,1) probabilities" << endl; cout << "Ni(0.99) = " << Ni(0.99) << endl; cout << "Ni(0.975) = " << Ni(0.975) << endl; cout << "Ni(0.05) = " << Ni(0.05) << endl; return 0; } // N(0,1) density double f(double x) { double pi = 4.0*atan(1.0); return exp(-x*x*0.5)/sqrt(2*pi); } // N(0,1) cdf by Boole's Rule double N(double x) { return Boole(-10.0, x, 240); } // Boole's Rule double Boole(double StartPoint, double EndPoint, int n) { vector<double> X(n+1, 0.0); vector<double> Y(n+1, 0.0); double delta_x = (EndPoint - StartPoint)/double(n); for (int i=0; i<=n; i++) { X[i] = StartPoint + i*delta_x; Y[i] = f(X[i]); } double sum = 0; for (int t=0; t<=(n-1)/4; t++) { int ind = 4*t; sum += (1/45.0)*(14*Y[ind] + 64*Y[ind+1] + 24*Y[ind+2] + 64*Y[ind+3] + 14*Y[ind+4])*delta_x; } return sum; } // Inverse cumulative N(0,1) by bisection algorithm double Ni(double prob) { return Bisection(prob, -10.0, 10.0); } // Bisection Algorithm double Bisection(double prob, double a, double b) { const int MaxIter = 500; double Tol = 0.00001; double midP, midCdif; double lowCdif = prob - N(a); double highCdif = prob - N(b); if (lowCdif*highCdif > 0) { double Temp = lowCdif; lowCdif = highCdif; highCdif = Temp; } else for (int i=0; i<=MaxIter; i++) { midP = (a + b) / 2.0; midCdif = prob - N(midP); if (abs(midCdif)<Tol) goto LastLine; else { if (midCdif>0) a = midP; else b = midP; } } LastLine: return midP; } // http://www.tohtml.com/cpp

## 2013-04-21, 4:57 PM, Sunday

### Normal Distribution Calculations

## 2013-04-20, 6:39 PM, Saturday

### Multi-dimensional Matrix – Implementation

/* Three Dimensional (3 by 5 by 2) Matrix dynamic memeory allocation */ #include <iostream> #include <vector> #include <boost/numeric/ublas/matrix.hpp> #include "boost/multi_array.hpp" using namespace std; template <class Type> class Mymatrix { private: size_t mWidth, mHeight, mDepth; std::vector<Type> mArray; public: Mymatrix(const size_t inWidth, const size_t inHeight, const size_t inDepth) : mWidth(inWidth), mHeight(inHeight), mDepth(inDepth) { mArray.resize(mWidth * mHeight * mDepth); mArray = std::vector<Type>(mWidth * mHeight * mDepth,9.9); } Type Get(const size_t inX, const size_t inY, const size_t inZ) { return mArray[(inX * mHeight * mDepth) + (inY * mDepth) + inZ]; } void Set(const size_t inX, const size_t inY, const size_t inZ, Type val) { mArray[(inX * mHeight * mDepth) + (inY * mDepth) + inZ] = val; } }; int main (int argc, char* argv[]) { // Method 1: pointer to pointer to pointer double*** M1 = new double** [3]; for (int i=0;i<3;++i){ M1[i] = new double* [5]; for (int j=0;j<5;j++){ M1[i][j] = new double [2]; for (int k=0;k<2;k++) M1[i][j][k]=i*100+j*10+k; } } cout<<M1[2][2][0]<<endl; for (int i=0;i<3;++i){ for (int j=0;j<5;j++){ delete[] M1[i][j]; } delete[] M1[i]; } delete[] M1; // Method 2 - matrix notation in C double (*M2)[5][2]; M2 = new double[3][5][2]; cout<<M2[2][2][0]<<endl; delete[] M2; // Method 3 - Boost :: matrix for 2-dim typedef boost::numeric::ublas::matrix<double> Bmatrix; Bmatrix M3(3,5); for (int i=0;i<3;i++) for (int j=0;j<5;j++) M3(i,j)=i*100+j*10; cout<<M3(2,2)<<endl; // Method 4 - Boost :: multi_array typedef boost::multi_array<double, 3> Trmatrix; typedef Trmatrix::index index; Trmatrix M4(boost::extents[3][5][2]); for (int i=0;i<3;i++) for (int j=0;j<5;j++) for (int k=0;k<2;k++) M4[i][j][k]=i*100+j*10+k; cout<<M4[2][3][1]<<endl; // Method 5 - Self-defined class Mymatrix<double> M5(3,5,2); M5.Set(2,4,1, 352.22); cout<<M5.Get(2,4,1)<<endl; char tmp; cin>>tmp; return 0; } // http://www.tohtml.com/cpp/

### Matrix Multiplication – trivial

#include <sstream> #include <string> #include <fstream> #include <iostream> #include <vector> using namespace std; typedef vector< vector<int> > Intmatrix; void read(string filename, int& n, Intmatrix &A) { string line; ifstream infile; infile.open (filename.c_str()); int i=0, j=0, a; while (getline(infile, line) && !line.empty() && i<n) { istringstream iss(line); j = 0; while (iss >> a) { A[i][j] = a; j++; } i++; } infile.close(); } Intmatrix ijkalgorithm(Intmatrix A, Intmatrix B) { int n = A.size(); // initialise C with 0s vector<int> tmp(n, 0); Intmatrix C(n, tmp); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { C[i][j] += A[i][k] * B[k][j]; } } } return C; } void printMatrix(Intmatrix matrix) { Intmatrix::iterator it; vector<int>::iterator inner; for (it=matrix.begin(); it != matrix.end(); it++) { for (inner = it->begin(); inner != it->end(); inner++) { cout << *inner; if(inner+1 != it->end()) { cout << "\t"; } } cout << endl; } } int main (int argc, char* argv[]) { string filename = "C:\\Projects\\2000.in"; int n = 3; Intmatrix A(n,n), B(n,n), C(n,n); read (filename, n, A); read (filename, n, B); C = ijkalgorithm(A, B); printMatrix(A); printMatrix(B); printMatrix(C); char tmp; cin>>tmp; return 0; } // https://github.com/MartinThoma/matrix-multiplication // http://www.tohtml.com/cpp/

### Matrix multiplication – Boost

#include <sstream> #include <fstream> #include <iostream> #include <algorithm> #include <string> #include <vector> #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/operation.hpp> using namespace std; using namespace boost::numeric::ublas; typedef matrix<int> Bmatrix; void read(string filename, int& n, Bmatrix &A) { string line; ifstream infile; infile.open (filename.c_str()); int i=0, j=0, a; while (getline(infile, line) && !line.empty() && i<n) { istringstream iss(line); j = 0; while (iss >> a) { A(i,j) = a; j++; } i++; } infile.close(); } void printMatrix(Bmatrix matrix) { for (int i=0; i < matrix.size1(); i++) { for (int j=0; j < matrix.size2(); j++) { cout << matrix(i, j); if(j+1 != matrix.size2()) { cout << "\t"; } } cout << endl; } cout<<endl; } int main (int argc, char* argv[]) { string filename = "C:\\Projects\\2000.in"; int n = 3; Bmatrix A(n,n), B(n,n), C(n,n); read (filename, n, A); read (filename, n, B); boost::numeric::ublas::axpy_prod(A, B, C); printMatrix(A); printMatrix(B); printMatrix(C); char tmp; cin>>tmp; return 0; } // http://martin-thoma.com/matrix-multiplication-python-java-cpp/ // http://www.tohtml.com/cpp/

## 2013-04-07, 9:56 PM, Sunday

### “Taking Short Cuts To Higher Returns With AQR’s Capital Antti Ilmanen Anderson Griggs” By Kendall Anderson

*On November 2-3 of 2011 the CFA Institute and CFA France sponsored the Fourth Annual European Investment Conference in Paris, France. Antti Ilmanen, Ph.D. was one of the presenters. The title of his Presentation was Understanding Expected Returns. This month’s letter is based on this presentation as it appeared in the June 2012 publication CFA Institute Conference Proceedings Quarterly.*

**One of the great advantages in the day to day routine of investment management is the freedom to study.** In most professions, time allocated to study is frowned on by the bosses of the organization. But, in the investment management business, time for study is encouraged, in hopes that financial rewards will follow.

**This same freedom to study, however, is also one of the great disadvantages to a typical day in investment management.** Because the volume of material available is so overwhelming, you can easily allow this freedom to take time and energy away from the daily work needed to fulfill your promises to your clients.

**Faced with the problem of time allocation, most of us in the profession of investment management will defer to short cuts**. We tend to pay attention to those articles that reinforce our beliefs and skip those articles that are contrary to that belief. It was this self reinforcing short cut that led me to a recent presentation by Antti Ilmanen, a managing director of AQR Capital Management (Europe) LLP at the Fourth Annual European Investment Conference, held last year in partnership with CFA France.

**AQR Capital Management was founded in January 1998 by a group of Goldman Sachs, & Company alumni and has been led by Cliff Asness**. Dr. Asness is well known in the world of investment management both for his academic work and his skillful management of AQR. For those outside the industry, Dr. Asness is better known for the home he is building—a 25,900 square foot house in Greenwich, Connecticut; complete with indoor swimming pool and tennis court. No doubt a library and office will take up a couple of square feet.

Dr. Asness could not have created enough individual wealth to build his mansion without the help of some very talented professionals who were willing to give up a bit of the financial rewards of owning a business for the freedom to spend more of their day in research, with the full blessing of a management team. I believe Antti Ilmanen is one such individual. Having earned a Ph.D. in Finance from Dr. Asness’ alma mater the University of Chicago, he spent a decade with Salomon Brothers before moving on to Brevan Howard, a well known hedge fund from the United Kingdom. His articles have earned him a Graham & Dodd scroll and the Bernstein Fabozzi/Jacobs Levy award. His book,**Expected Returns: An Investor’s Guide to Harvesting Market Rewards**, was published last year. If you are interested, this 500 page book is readily available for your reading pleasure from your favorite book outlet.

**Knowing that reading a 500 page book on finance is not at the top of your to do list, I thought I would share with you a few of Dr. Ilmanen’s findings** as seen through the biased eyes of yours truly, a confirmed cheapskate who only wants to invest in bargains. His work shows that five investment strategies, value, carry, trend or momentum, volatility, and liquidity have worked well over long periods of time to gain diversification and reduce the dependency of returns on the stock market itself. Having worked well over time gives us no guarantee that they will work in the future, however it is worth our effort to learn a little about them and see if they could work for us.

**Carry**

**Of the five, carry should not be a strategy used by individual investors**. In its simplest form, carry investing involves borrowing at a low rate and lending at a higher rate in hopes that the interest earned exceeds the interest cost of borrowing. Institutions have developed multiple approaches to use carry in just about every major asset class. This sophisticated strategy requires a very large commitment of funds and the need to be able to withstand periods of dramatic losses that could easily wipe out years of solid performance.

**Liquidity**

**Like carry, a strategy based on liquidity, or should I say illiquidity, is not for the majority of us. **Common sense tells us that if we give up the ability to convert our investments into cash immediately we should be paid extra. As Dr. Ilmanen’s work shows this has held true over many years. The problem, like carry, is that you must commit an exceptional amount of cash that you will not need in order to obtain the rewards of illiquidity. Recently a number of firms have promoted the use of non-tradable real estate investment trusts to individuals to capture this extra return. Not counting the fact that the majority of these REITs rely on the use of borrowed money that has to be repaid before you profit, the minimum investment required for many of these programs can reduce your overall portfolio diversification, thus increasing your risk. Because advertising rules are changing, you will soon be seeing and hearing of the great opportunities that venture capital, hedge funds, private equity and other forms of illiquid investment programs can provide you. Think twice before commiting.

**Trend and Momentum**

**A strategy based on trend and momentum is capable of use by sophisticated individual investors.** Following trends is a form of market timing strategy – one investment at a time. Momentum strategy is normally used in a long-short strategy. The easiest to implement is buying last years’ winners while simultaneously selling last years’ losers. The reason these strategies work is based on psychology. Most of us feel very comfortable investing after our portfolio has increased in value, not after the market has fallen. If we owned shares in a company and the price has declined we can hold on, we could sell or we could buy more. Most individuals will hold or sell after a decline. On the other hand, most investors want to buy more of a good thing and a good thing in common stocks, at least in our minds, is only if the shares have gone up in price. These feelings, those of feeling good and buying more shares, or feeling bad and selling our shares, last for about a year. Around this time is when fundamentals take over, quickly reversing any over or under valuation. Following trends or using a momentum strategy has been shown to work, but has been almost impossible for an individual investor to follow. It seems we hang on to our shares well beyond the time that the process works, allowing a full circle to complete in capturing zero return. Personally, I feel very few people can successfully follow a trading strategy for long enough time to reap the rewards. Ultimately fear or greed will enter our minds and take away any potential gain.

**Volatility**

**The final two strategies, value and volatility, should be understood and used if possible for your individual portfolios.** Dr. Ilmanen uses insurance and lotteries to illustrate his thoughts on volatility. Major volatility in the markets causes investors to overpay for insurance against downside volatility and at the other extreme, make a very risky investment in hopes of a substantial gain— both of which reduce expected returns. This is what he says about insurance against market volatility: *The best-known insurance strategies involve various methods of selling equity index volatility. Doing so earns good long-run returns but at the risk of huge losses when bad times occur.*

This is what he says about taking huge risks in hopes of huge payoffs: *It turns out that the most volatile assets within every asset class offer surprisingly poor long-term returns.*

If the cost of insurance has exceeded the benefits and taking huge risks has had very little payoff, then what about the middle ground, where we accept some levels of risk in hopes of a small but meaningful increase in returns? Dr. Ilmanen gives us the answer: “Meanwhile, low-volatility assets offer surprisingly good returns for taking a small risk. Defensive investments often provide the same or perhaps better risk adjusted returns than their more speculative peers.”

Most of you are thinking these statements are directly related to common stocks. But they also apply to bonds and CDs. His studies also give us this information: “…there is no reward for holding bonds with a maturity greater than five years and …that CCC rated bonds underperformed more highly rated credits.”

Our own approach to investing in bonds and CDs has been to earn an average return of a five year yield over our lifetime. The problem today is that the beginning rates on all fixed income obligations are so low that the payoff for extending maturities is not enough. When and if rates rise so that their current yield exceeds the expected inflation rate, then we will begin extending maturities in our balanced accounts.

**Value**

**Most of you know that I am a firm believer in value investing. Buying low just seems to make sense**. Being reassured by study after study that value investing pays, I was not surprised by Dr. Ilmanen’s inclusion of value as a strategy worthy of your consideration. Value investing is simple to understand. It is buying an asset (stock, bond or any other type you can think of) at a price less than you determine its real value is today. This broad definition does not work very well in any type of academic study. Since data on common stocks is easy to obtain, you will find that the majority of the academic studies on value investing is limited to this asset class. In addition, most academic studies use a calculation of market price to the accounting book value of a company or the market price to the recent earnings of a company to identify value stocks. Dr. Illmanen uses prices to book multiples. According to Dr. Ilmanen the reason for the outperformance of value over time is behavioral. Here is what he said:

*“Various behavioral interpretations, however, may offer more compelling explanations for the out-performance of value stocks. Here is the main narrative. Value investing works within markets and in many other contexts because of the overpricing of the hope for growth. If there is high growth in a stock or sector or a country, investors tend to extrapolate further subsequent growth, resulting in high valuations. Typically, this expectation is followed by disappointment in the growth rate and return”.*

**Common sense tells us to “buy low and sell high”. Why then, is it so difficult for each of us to accept that statement and put it to work for our benefits?** Common stocks, especially individual common stocks, do not become low priced without a reason. Knowing the reason, determining if it is real or temporary, and then estimating how long it will take to correct the problem, opens the door to many opportunities for error. Most of us can understand the problems facing a company. Most of us can judge whether that problem is temporary or real. The reason most of us fail to be rewarded is that we cannot or will not hang on long enough for the problems to be corrected and reflected in a higher market price.

**The Results**

I wanted share with you Dr. Ilmanen’s results. He reported his results as excess returns up and beyond the returns earned by U.S. Treasury Bills. To have some meaning, he also provided the annualized volatility. The beginning dates used for each return is when the data became available. They range from 1900 for U.S. and World Stocks to 1990, the first year data on volatility was available.

**This table provides the long history of the market’s returns (ending December 2009)**

Static Risk Premium | Compounded Avg. Excess Return | Annualized Volatility |

World Stocks | 4.5% | 17.3% |

U.S. Stocks | 5.2% | 20.1% |

World Term Premium(bonds) vs. U.S. T-Bill | 0.7% | 8.4% |

U.S. Term Premium (7-10 year) vs. U.S. T-Bill | 1.4% | 6.7% |

U.S. Inv. Grade Corporate bonds vs. Treasury | 0.3% | 4.4% |

**This table provides the long history of the 5 strategies (ending December 2009)**

Dynamic Strategy Premiums | Compounded Avg. Excess Return | Annualized Volatility |

Carry-seeking composite | 6.9% | 9.8% |

Liquidity | 5.1% | 12.3% |

Trend-Following Composite | 9.6% | 10.2% |

Volatility selling (equity index) | 4.2% | 15.3% |

Value | 4.6% | 7.2% |

**Some Final Thoughts**

**We should all be aware of what has worked in the past. However, the only guarantee any of us can give is that things will change**. Most of you know that in order to obtain higher returns you will need to assume some risk. What I find the most interesting of Dr. Ilmanen’s work is the reassurance to me that higher risk does not equate to higher returns. Most of us are far better off in the middle of the risk/reward spectrum. Sticking with quality stocks in combination with short to intermediate term bonds and CDs may not provide the highest short term returns. But it could very easily provide you with the highest returns you could achieve over a lifetime.

Until next time,

Kendall J. Anderson, CFA

## 2013-04-01, 9:00 PM, Monday

### Risk of mistakely using empirical correlation

#--------------------------------------------------------------------------------------------- #| Comparing different copula; #| Showing why empirical correlation will be risky if your assumption #| of the join distribution (ex: Multi normal vs. t) is not correct. #| #| Created by: Nan Zhou #| Time: 2012-03-05 #--------------------------------------------------------------------------------------------- rm(list=ls()) #memory.size(max=4000) require(copula) n = 1000 # number of simulation of random sample p =0.95 # quantile line for the plot df = 1 # d.f. of marginal distribution of t rho = 0.75 # empirical correlation dist.margin = "norm" # Marginal distribution #--------------------------------------------------------------------------------------------- #Normal Marginal mvd.gaussian <- mvdc(copula = ellipCopula(family = "normal", param = rho), margins = rep(dist.margin,2), paramMargins = list(list(mean = 0,sd = 1), list(mean = 0, sd = 1))) myMvd2 <- mvdc(copula = ellipCopula(family = "t", param = rho, df = df), margins = rep(dist.margin,2), paramMargins = list(list(mean = 0,sd = 1), list(mean = 0, sd = 1))) #windows ( width = 150, height = 200) #layout(matrix(c(1,2,3,3),ncol=2,byrow=TRUE),heights=c(1,2)) par(mfrow=c(3,2)) contour(mvd.gaussian, dmvdc, xlim = c(-3, 3), ylim = c(-3, 3)) title(paste("Gaussian Copula, Margin Dist =", dist.margin, ", rho =", rho, ", df =",df), cex.main=.8) contour(myMvd2, dmvdc, xlim = c(-3, 3), ylim = c(-3, 3)) title(paste("t Copula, Margin Dist =", dist.margin, ", rho =", rho, ", df =",df), cex.main=.8) for (rho in c(0.75,0.25)) for (df in c(1,5)) { seed = 1 lim.plot = c(-3,3) u <- rcopula(normalCopula(param = rho, dim = 2), n) v <- rcopula(tCopula(param = rho, dim = 2, df = df), n) plot(qnorm(u),xlab='x1',ylab='x2',col='blue',cex=0.6,xlim=lim.plot,ylim=lim.plot) title(paste("rho=",rho,"df=",df)) points(qnorm(v),xlab='x1',ylab='x2',col='red',cex=0.6) q = qnorm(p) abline(v = q) abline(h = q) text(-2,q,paste(p*100,"% quantile")) }

## 2013-03-20, 10:04 AM, Wednesday

### How many times are expected to try all songs in your itune library?

Today, when I was very boring with all my songs in iphone libary, I suddenly thought about one quick but useful math puzzle: assuming the iphone library has 100 songs and you use random shuffle (with re-sample) for next song, how many times are expected to listen all these songs?

This is very similar as the classical Coupon Collection problem. Here is a quick dirty R simulations, with two plots in regular scale and log scale:

N.songs = 100

temp <- function(N.songs){

return ( sum(N.songs/(1:N.songs)) )

}

x <- 1:N.songs

y <- sapply(x,temp)

par(mfrow=c(1,2))

plot(x,y,type=’l’,col=3, lty=2, lwd=2,log =’x’, xlab=’Number of New Songs’, ylab = “# Trials Needed”)

plot(x,y,type=’l’,col=3, lty=2, lwd=2, xlab=’Number of New Songs’, ylab = “# Trials Needed”)

## 2013-03-16, 2:19 PM, Saturday

### Python First Try

import pandas as pd

import pandas.io.sql as pd_sql

import sqlite3 as sql

import os

path = “C:\\test”

os.chdir(path)

DF = pd.read_csv(‘spy2011.csv’)

DF.head(10)

con = sql.connect(‘data.db’)

pd_sql.write_frame(DF, “tbl”, con)

con.commit()

df = pd.DataFrame({‘label’: [‘A’, ‘B’, ‘C’] + [‘B’] * 2 + [‘A’] * 3, ‘value’: [4, 3, 6, 3, 1, 2, 4, 4]})

df2 = df.sort([‘label’, ‘value’])

df3 = df2.drop_duplicates([‘label’])

## 2013-03-13, 10:31 PM, Wednesday

### Random Permutation 2 – Random sample m out of n

int[] pickMRandomly(int[] array, int m) { int[] subset = new int[m]; for (int j = 0; j < m; j++) { int index = random(j, n); // random number between j and n int temp = array[index]; array[index] = array[j]; array[j] = temp; subset[j] = temp; } return subset; }

### Random Permutation 1 – shuffle

public static void shuffleArray(int[] array) { Random generator = new Random(); for (int j = 0; j < array.length; j++) { int index = generator.nextInt(array.length-j)+j; // random number between j and n int temp = array[index]; array[index] = array[j]; array[j] = temp; } }

## 2013-01-20, 1:01 AM, Sunday

### Fibonacci Number: Recursive vs. Iterative

#include <iostream> using namespace std; // Recursive: int fibo2(int n){ if (n<=0) return -1; else if (n<=2) return 1; else return fibo2(n-1)+fibo2(n-2); } // Iterative: int fibo1(int n){ if (n<=0) return -1; int a=1, b=1, c=2; for (int i=3; i<=n; i++){ c = a+b; a = b; b = c; } return b; } int main() { cout<<"Fiboracci Number:\n"; for (int n=1; n<40; n++){ cout<<fibo1(n)<<endl; } return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++

## 2012-12-31, 4:00 PM, Monday

### [Cited] JPMorgan Chase: Too Big To Fail May Be Just Big Enough To Succeed

“JPM is #1 in Global Investment Banking fees. It is #1 in Fixed Income Markets income share. It is one of the biggest derivatives traders. One might at first think that the new rules for FinReg that are set to be implemented in 2013 would hurt JPM more than others. However, the opposite may be true. There are two derivatives laws that are particularly important.”

“The first law would require that most derivatives be traded on open electronic platforms, with prices visible to all participants before deals are done. This would over time lead to much lower prices for derivatives. It would mean lower prices (margins) for JPM in this area. However, the big traders say that large transactions if done openly would disrupt the markets. They are lobbying for exemptions. The big traders such as JPM will be the ones to get these exemptions. They are the ones that will get the higher prices for their large trade services.”

“Another area of weakness in the law is the following loophole. Traders are now required to post U.S. Treasuries as collateral for a bigger part of their trades. Many do not have these Treasuries handy. A new business has sprung up to service this. The big banks will lend the traders U.S. Treasuries or other qualified securities in exchange for lower rated securities (for a fee). This is being called collateral transformation. Experts say this is just a way of hiding risks this law was supposed to avert. In other words the law has created a lot of red tape, with little actual risk avoidance for this case (and many others). JPM should be able to benefit from providing this service.”

## 2012-12-18, 8:17 AM, Tuesday

## 2012-12-11, 6:50 PM, Tuesday

### Implementation of Binary Search Tree

#include <iostream> #include <vector> using namespace std; struct Tree { char data; Tree *left, *right, *parent; Tree(char a){ data = a; left = NULL; right = NULL; parent = NULL; } }; Tree* insertTreeNode(Tree* node, char data){ if(node == NULL){ Tree* newNode = new Tree(data); return newNode; } else if(data <= node->data){ node->left = insertTreeNode(node->left, data); node->left->parent = node; } else{ node->right = insertTreeNode(node->right, data); node->right->parent = node; } return node; } bool isBST(Tree* root){ if (root == NULL) return 1; else if (!isBST(root->left) || !isBST(root->right)) return 0; else if(root->left){ if(root->left->data > root->data) return 0; } else if(root->right){ if(root->data >= root->right->data) return 0; } return 1; } int main () { char ch, ch1, ch2; Tree *found=NULL, *succ=NULL, *pred=NULL, *ancestor=NULL; char charArr[9] = {'A','B','C','D','E','F','G','H','I'}; Tree* root=NULL; // for (int i=0;i<sizeof(charArr)/sizeof(char);++i) // insertTreeNode(root,charArr[i]); root = insertTreeNode(root,'B'); root->left = insertTreeNode(root->left,'A'); /* is the tree BST? */ cout<< "Binary Search Tree?: "<< ( isBST(root) ? 'Y':'N') <<endl; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++ //http://www.bogotobogo.com/cplusplus/cpptut.php

## 2012-12-10, 5:44 PM, Monday

### Implementation of Hashing Table

#include <iostream> #include <fstream> #include <string> using namespace std; const int TABLE_SIZE = 128; struct TableList { string key; int value; struct TableList *next; }; unsigned hash_function(string s) { unsigned hash = 0; for (int i = 0; i < s.length(); i++) hash = s[i] + 31*hash; return hash % TABLE_SIZE; } unsigned rehash(unsigned); class HashMap { private: TableList **table; public: HashMap() { table = new TableList*[TABLE_SIZE]; for(int i = 0; i < TABLE_SIZE; i++) table[i] = NULL; } void showMap() { struct TableList *tp; for (int i = 0; i < TABLE_SIZE; i++) { tp = table[i]; if(tp){ cout << "table[" << i << "] " << tp->key << "(" << tp->value << ")"; tp = tp->next; } else continue; while(tp) { cout << "->" << tp->key << "(" << tp->value << ")"; tp = tp->next; } cout << endl; } } struct TableList* lookup(string s) { struct TableList *tp; for(tp = table[hash_function(s)]; tp != NULL; tp = tp->next) if((tp->key).compare(s) == 0) return tp; // found return NULL; // not found } void put(string key, int value) { struct TableList *tp; unsigned hash; // not found if(!(tp = lookup(key))) { tp = new TableList; tp->key = key; tp->value = value; hash = hash_function(key); tp->next = table[hash]; table[hash] = tp; // it's already there } else { tp->key = key; tp->value = value; hash = hash_function(key); table[hash] = tp; } } ~HashMap() { for (int i = 0; i < TABLE_SIZE; i++) if (table[i] != NULL) delete table[i]; delete[] table; } }; int main() { HashMap m; string line; ifstream dict_reader("C:/Temp/linux.words"); //ifstream dict_reader("C:/Temp/shortlist"); if( !dict_reader ) { cout << "Error opening input file - dict " << endl ; exit(1) ; } int count = 0; while(getline(dict_reader,line)) { if((line[0] >= 'x' && line[0] < 'y') || (line[0] >= 'X' && line[0] < 'Y') ) { m.put(line,count++); } } m.showMap(); return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++ //http://www.bogotobogo.com/cplusplus/cpptut.php

## 2012-12-07, 6:01 PM, Friday

### Implementation of Linked List

#include <iostream> using namespace std; struct Node { int data; Node* next; }; // only for the 1st Node void initNode(Node *head, int n){ head->data = n; head->next =NULL; } // apending void addNode(struct Node *head, int n) { Node *newNode = new Node; newNode->data = n; newNode->next = NULL; Node *cur = head; while(cur) { if(cur->next == NULL) { cur->next = newNode; return; } cur = cur->next; } } // insert at the head void insertFront(struct Node **head, int n) { Node *newNode = new Node; newNode->data = n; newNode->next = *head; *head = newNode; } // Traversing the list /*display*/ void display(struct Node* head) { Node *list = head; while(list) { cout << list->data << ""; list = list->next; } cout << endl; cout << endl; } /*search a node*/ struct Node *searchNode(struct Node* head, int n) { while(head) { if(head->data == n) return head; head = head->next; } cout << "No Node " << n << " in list.\n"; } struct Node *searchNode2(struct Node* head, int n) { while(head!=NULL & head->data != n) { head = head->next; } return head; } /*delete a node*/ bool deleteNode(struct Node **head, Node *ptrDel) { Node* cur = *head; if(ptrDel == *head) { *head = (**head).next; delete ptrDel; return true; } while(cur){ if(cur->next == ptrDel) { cur->next = ptrDel->next; delete ptrDel; return true; } cur = cur->next; } return false; } // delete void deleteLinkedList(struct Node **node) { struct Node* tmpNode; while(*node) { tmpNode = *node; *node = tmpNode->next; delete tmpNode; } } // reverse the list struct Node* reverse(struct Node** List) { Node* parent = *List; Node* me = parent->next; Node* child = me->next; /* make parent as tail */ parent->next = NULL; while(child) { me->next = parent; parent = me; me = child; child = child->next; } me->next = parent; *List = me; return *List; } // copy void copyLinkedList(struct Node *node, struct Node **pNew) { if(node != NULL) { *pNew = new Node; (*pNew)->data = node->data; (*pNew)->next = NULL; copyLinkedList(node->next, &((*pNew)->next)); } } // Compare two linked list: return value: same(1), different(0) int compareLinkedList(struct Node *node1, struct Node *node2) { static int flag; /* both lists are NULL */ if(node1 == NULL && node2 == NULL) { flag = 1; } else { if(node1 == NULL || node2 == NULL) flag = 0; else if(node1->data != node2->data) flag = 0; else compareLinkedList(node1->next, node2->next); } return flag; } int main() { Node* newHead=0; Node* head = new Node; initNode(head,10); display(head); addNode(head,20); addNode(head,30); display(head); insertFront(&head,5); display(head); int numDel = 5; Node* ptrDelete = searchNode(head,numDel); if(deleteNode(&head,ptrDelete)) cout << "Node "<< numDel << " deleted!\n"; display(head); cout << "The list is reversed\n"; reverse(&head); display(head); cout << "The list is copied\n"; copyLinkedList(head,&newHead); display(newHead); cout << "Comparing the two lists...\n"; cout << "Are the two lists same?\n"; if(compareLinkedList(head,newHead)) cout << "Yes, they are same!\n"; else cout << "No, they are different!\n"; cout << endl; numDel = 20; ptrDelete = searchNode(newHead,numDel); if(deleteNode(&newHead,ptrDelete)) { cout << "Node "<< numDel << " deleted!\n"; cout << "The new list after the delete is\n"; display(newHead); } cout << "Comparing the two lists again...\n"; cout << "Are the two lists same?\n"; if(compareLinkedList(head,newHead)) cout << "Yes, they are same!\n"; else cout << "No, they are different!\n"; cout << endl; cout << "Deleting the copied list\n"; deleteLinkedList(&newHead); cout << "Displaying afer deleting:\n"; display(newHead); cout << "END\n"; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++ //http://www.bogotobogo.com/cplusplus/cpptut.php

## 2012-12-06, 11:24 PM, Thursday

### Replace a char by a string

// Replace all spaces in a string with '%20' #include <iostream> #include <string> using namespace std; //Method 1: use STL::string string repUsestring(string str){ string newStr; for (int i=0; i<str.length(); i++){ if (str[i]==' ') newStr += "%20"; else newStr += str[i]; } return newStr; } //Method 2: use C-stype array char* repUsearray(char* str){ int length = strlen(str), newLength=length; for (int i=0;i<length;i++){ if (str[i]== ' ') newLength+=2; } char* newStr = new char[newLength]; int j = 0; for (int i=0; i<length; i++){ if (str[i]!=' '){ newStr[j] = str[i]; j++; } else{ newStr[j] = '%'; j++; newStr[j] = '2'; j++; newStr[j] = '0'; j++; } } newStr[newLength]=0; //ALWAYS REMEMBER TO ADD THE END OF ARRAY return newStr; } int main(){ string str1("test a test b test c "); char* str2 = "ai ni ai wo ai ta "; cout<<str1<<endl; cout<<repUsestring(str1)<<endl; cout<<str2<<endl; cout<<repUsearray(str2)<<endl; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++ //http://www.bogotobogo.com/cplusplus/cpptut.php

### Remove duplicate characters

// Remove duplicated characters #include <iostream> using namespace std; // Method1: no (large) memory; O(n^2) void rmDuplicated1(char str[]) { int nOld = strlen(str); if (nOld>1) { int nNew = 0; int j; for (int i=0; i < nOld; ++i){ //iterate for each orignal element // check if this element is a nonduplicated one for (j=0; j<nNew; ++j){ if (str[j]==str[i]) break; } // if nonduplicated, add it. if (j==nNew){ nNew++; str[nNew-1] = str[i]; } } str[nNew] = 0; } } // Method2: addtional memory; O(n) char* rmDuplicated2(char str[]) { int nOld = strlen(str); if (nOld<2) return str; else{ char* newStr = new char[nOld]; bool check[256] = {0}; int nNew = 0; for (int i=0; i < nOld; ++i){ if (!check[(int)str[i]]){ newStr[nNew] = str[i]; nNew++; check[str[i]] = true; } } newStr[nNew] = 0; return newStr; } } int main(){ char* str = new char[10]; strcpy(str, "aaaBBB"); // char* str = "Hello" doesn't work. Cz here str is const type. cout<<str<<endl; rmDuplicated1(str); cout<<str<<endl; cout<<rmDuplicated2(str)<<endl; delete[] str; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++

## 2012-11-02, 4:49 PM, Friday

### Reverse a C-style String

// Reverse a C-Style String #include <iostream> using namespace std; void reverse(char *str) { char * end = str; char tmp; if (str) { while (*end) { ++end; } --end; while (str < end) { tmp = *str; *str++ = *end; *end-- = tmp; } } } int main(){ char* str = new char[6]; strcpy(str, "Hello"); // char* str = "Hello" doesn't work. Cz here str is const type. cout<<str<<endl; reverse(str); cout<<str<<endl; delete[] str; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++

### STL Stack and Queue

// Read words and print them in reverse order; // Using STL Stack or Queue; // http://www.cplusplus.com/reference/stl/stack/ //Highlighted at http://tohtml.com/cpp/ #include <iostream> #include <stack> #include <queue> #include <string> using namespace std; int main() { // stack<string> allWords; queue<string> allWords; string word; while (cin >> word) { allWords.push(word); } cout << "Number of words = " << allWords.size() << endl; while (!allWords.empty()) { // cout << allWords.top() << endl; cout << allWords.front() << endl; allWords.pop(); } return 0; }

## 2012-11-01, 9:58 PM, Thursday

### Stack/Queue Implementation in Cpp

#include <iostream> #include <string> using namespace std; typedef int Object; class Node{ public: Object data; Node* next; Node (Object item){ data = item; next = NULL; } ~Node(){ delete next;} }; class Stack{ public: Node* head; Object pop(){ if (head != NULL){ Object item = head->data; head = head->next; return item; } return NULL; } void push (Object item){ Node* t = new Node (item); t->next = head; head = t; } }; class Queue{ public: Node* head; Node* tail; void push (Object item){ Node* t = new Node (item); if(tail!=NULL){ tail->next = t; tail = t; } else{ head=tail=t; } } Object pop(){ if (head != NULL){ Object item = head->data; head = head->next; return item; } return NULL; } }; int main() { Stack* st=new Stack(); Queue* qu=new Queue(); Object temp; while (cin >> temp){ st->push(temp); qu->push(temp); } cout<<"Stake (FILO): "; while(st->head !=NULL){ cout<< st->pop(); } cout<<endl; cout<<"Queue (FIFO): "; while(qu->head !=NULL){ cout<< qu->pop(); } cout<<endl; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++

## 2012-10-21, 12:25 AM, Sunday

### Reading Notes

“The need for performance evaluation in the technical analysis” – it seems to summarize some good performance evaluations and parameter robustness and optimizations.

ECN (also referred to as Alternative Trading Systems): http://en.wikipedia.org/wiki/Electronic_communication_network

Fee structure

Dark Pool

## 2012-10-18, 10:27 PM, Thursday

### Pairs Trading – VXX vs. VXZ

################################################################################################ # Quantitative Trading Strategies - Volatility Trading: # Date Created: 2012-10-18 # Author: Nan Zhou, zhounan1983@gmail.com # # Use volatility futures, shortdate vs medium dated # VXX iPath S&P 500 VIX Short-Term Futures ETN (VXX) # VXZ iPath S&P 500 VIX Mid-Term Futures ETN (VXZ) # # Plan is to check for trade entry at the open, and exit the trade at the close: # IF signal < signalLowLim then in contango and do a carry trade # IF signal > signalUpperLim then in backwardation so do a reverse carry # ELSE do nothing ################################################################################################ ## Install all required packages: rm(list=ls()) library("quantmod"); library("PerformanceAnalytics") par(ask=T); memory.size(max = 4000); options(digits=8, digits.secs=4) ## PLEASE CHANGE THE WORKFOLD TO YOUR WORK FOLD FIRST ## workfold <- "H:\\My References\\Research\\2012_10_Trading Codes"; # workfold <- "C:\\Dropbox\\Research\\2012-10 VIX Trading Strategies"; setwd(workfold); ################################################################################################ # INPUT OF PARAMETERS ################################################################################################ signalLowLim <- 0.9 signalUpperLim <- 1.1 symbolLst <- c("VXX", "VXZ", "^GSPC") dataSrc <- "yahoo" dataSrc <- "csv" startDate <- as.Date("2010-01-01") #Specify what date to get the prices from hedgeTrainingStartDate <- startDate #Start date for training the hedge ratio hedgeTrainingEndDate <- as.Date("2011-01-01") #End date for training the hedge ratio tradingStartDate <- as.Date("2011-01-02") #Date to run the strategy from # tradingEndDate <- as.Date("2011-12-31") #Date to run the strategy from tradingEndDate <- Sys.Date() #-------------------------------------------------------------- ### SECTION 1 - Download Data & Calculate Returns ### ## Download the data: pnlEnv <- new.env() #Make a new environment for quantmod to store data in importOHLC <- function(symbolLst = NULL, env = .GlobalEnv, src = "yahoo", format = "%m/%d/%Y", return.class = "xts", from = Sys.Date()-365, end = Sys.Date()){ if (src == "csv"){ for (i in 1:length(symbolLst)){ symbolLst <- toupper(gsub("\\^", "", symbolLst)) symbol <- symbolLst[i] namesData <- c("Open", "High", "Low", "Close", "Volume", "Adj.Close") fr <- read.csv(file = paste(symbol,'csv',sep='.')) fr <- xts(fr[, namesData], as.Date(as.character(fr[, "Date"]), format = format)) colnames(fr) <- paste(symbol, c("Open", "High", "Low", "Close", "Volume", "Adjusted"), sep = ".") fr <- quantmod:::convert.time.series(fr = fr, return.class = return.class) fr <- window(fr, start = from, end = end, extend = TRUE) assign(symbol, fr, env) } } else{ getSymbols(symbolLst, env = env, src = src, from = from, end = end) } return(symbolLst) } importOHLC(symbolLst, env = pnlEnv, src = "csv", format = "%m/%d/%Y", from = startDate, end = Sys.Date()) ## Calculate returns for VXX and VXZ: vxxRet <- (Cl(pnlEnv$VXX)/Op(pnlEnv$VXX))-1 colnames(vxxRet) <- "Ret" pnlEnv$VXX <- cbind(pnlEnv$VXX,vxxRet) vxzRet <- (Cl(pnlEnv$VXZ)/Op(pnlEnv$VXZ))-1 colnames(vxzRet) <- "Ret" pnlEnv$VXZ <- cbind(pnlEnv$VXZ,vxzRet) #-------------------------------------------------------------- ### SECTION 2 - Calculating the hedge ratio ### ## Want to work out a hedge ratio, so that we can remain Vega neutral (the futures contact are trading VEGA) trainVxx <- window(pnlEnv$VXX$Ret,start=hedgeTrainingStartDate ,end=hedgeTrainingEndDate) trainVxz <- window(pnlEnv$VXZ$Ret,start=hedgeTrainingStartDate ,end=hedgeTrainingEndDate) trainData = na.omit(cbind(trainVxx,trainVxz)) colnames(trainData) <- c("VXX","VXZ") ## Simply linearly regress the returns of Vxx with Vxz: regression <- lm(trainData[,"VXZ"] ~ trainData[,"VXX"]) #Linear Regression hedgeratio <- regression$coefficient[2] plot(x=as.vector(trainData[,"VXX"]), y=as.vector(trainData[,"VXZ"]), main=paste("Hedge Regression: Y =", regression$coefficient[2], " * X + intercept"), xlab="Vxx Return", ylab="Vxz Return", pch=19) abline(regression, col = 2 ) #-------------------------------------------------------------- ### SECTION 3 - Generate trading signals ### tSig <- Op(pnlEnv$VXX)/Op(pnlEnv$VXZ) colnames(tSig) <- "Signal" vxxSignal <- apply(tSig,1, function(x) {if(x<signalLowLim) { return (-1) } else { if(x>signalUpperLim) { return(1) } else { return (0) }}}) vxzSignal <- -1 * vxxSignal strategyRet <- ((vxxSignal * pnlEnv$VXX$Ret) + hedgeratio * (vxzSignal * pnlEnv$VXZ$Ret) ) strategyRet <- window(strategyRet,start=tradingStartDate,end=tradingEndDate, extend = FALSE) #Normalise the amount of money being invested on each trade so that we can compare to the S&P index later strategyRet <- strategyRet * 1 / (1+abs(hedgeratio)) colnames(strategyRet) <- "StrategyReturns" #-------------------------------------------------------------- ### SECTION 5 - Performance Analysis ### ## Get the S&P 500 index data: indexData <- pnlEnv indexRet <- (Cl(indexData$GSPC)-lag(Cl(indexData$GSPC),1))/lag(Cl(indexData$GSPC),1) colnames(indexRet) <- "IndexRet" pnlVec <- cbind(as.zoo(strategyRet),as.zoo(indexRet)) #Convert to zoo object colnames(pnlVec) <- c("Vol Carry Trade","S&P500") pnlVec <- na.omit(pnlVec) charts.PerformanceSummary(pnlVec,main="Performance of Volatility Carry Trade",geometric=FALSE) #Lets calculate a table of montly returns by year and strategy cat("Calander Returns - Note 13.5 means a return of 13.5%\n") print(table.CalendarReturns(pnlVec)) #Calculate the sharpe ratio cat("Sharpe Ratio") print(SharpeRatio.annualized(pnlVec)) #Calculate other statistics cat("Other Statistics") print(table.CAPM(pnlVec[,"Vol Carry Trade"],pnlVec[,"S&P500"])) chart.Boxplot(pnlVec) layout(rbind(c(1,2),c(3,4))) chart.Histogram(pnlVec, main = "Plain", methods = NULL) chart.Histogram(pnlVec, main = "Density", breaks=40, methods = c("add.density", "add.normal")) chart.Histogram(pnlVec, main = "Skew and Kurt", methods = c("add.centered", "add.rug")) chart.Histogram(pnlVec, main = "Risk Measures", methods = c("add.risk")) layout(c(1,1))

## 2012-06-14, 2:06 PM, Thursday

### Static members and functions

// A static member is shared by all objects of the class. // All static data is initialized to zero when the first object is created, if no other initialization is present. // We can't put it in the class definition but it can be initialized outside the class // http://tohtml.com/cpp/ #include <iostream> using namespace std; class MyClass { public: MyClass(){std::cout << "default constructor" << ++count <<endl;} static int count; }; int MyClass::count = 0; int main(int argc, char** argv) { MyClass* myObjArray = new MyClass[5]; cout<<MyClass::count<<endl; } // A static member function can be called even if no objects of the class exist, // and the static functions are accessed using only the class name and the scope resolution operator ::.

## 2012-06-06, 6:33 PM, Wednesday

### ASCII Cstring/CharArray to float

#include <iostream> using namespace std; double atofA(char s[]) { int sign = 1, i = 0, left = 0, power = 10; double right = 0.0; // Determine the sign: if (s[0]=='-') { sign = -1; i++; } // Calculate the integer part - left: while(s[i] && s[i]!='.') { left = left*10 + (s[i]-'0'); i++; } // Calculate the float part - right: if (s[i]=='.') { i++; while(s[i]) { right = right + (double)(s[i]-'0')/(power); power*=10; i++; } } return sign*(left+right); } int main() { double d = atofA("-314.1592"); // double d = atofA("-.1592"); cout.precision(7); cout << d << endl; return 0; } // Syntax highlighted at: http://tohtml.com/cpp/

## 2012-05-31, 9:06 PM, Thursday

### Remove Specified Characters

#include <iostream> using namespace std; int main() { char s[] = "Battle of the Vowels: Hawaii vs. Grozny"; char r[] = "aeiou"; int *flags = new int[128]; // assumes ASCII! int src, dst; // Set flags for characters to be removed for( src = 0; src < strlen(r); ++src ) { flags[(int)r[src]] = 1; } src = 0; dst = 0; while(src<strlen(s)) { if(flags[(int)s[src]]!=1) { s[dst++] = s[src]; } src++; } delete[] flags; char* res = new char[dst+1]; strncpy(res, s, dst); res[dst] = ''; cout<<res<<endl; delete[] res; return 0; } /* int main(){ char s[] = "Battle of the Vowels: Hawaii vs. Grozny"; char r[] = "aeiou"; char* sNew = new char[strlen(s)]; int* flag = new int[128]; for (int i=0; i < strlen(r); i++){ flag[(int)r[i]]=1; } int iNew = 0; for (int i=0; i < strlen(s); i++){ if (flag[(int)s[i]]!=1){ sNew[iNew++] = s[i]; } } sNew[iNew]=''; cout<<sNew<<endl; delete[] flag; delete[] sNew; return 0; } */ //Highlighted at http://tohtml.com/cpp/

### First Non-Repeated Character

#include <iostream> #include <string> using namespace std; // The first 128 code points (0–127) of Unicode correspond to // the letters and symbols on a standard U.S. keyboard. // C stype string is an array of char, and ended by '' automatically; // char x[]="abc"; then strlen(x)=3; // char x[]="abcdef"; then strlen(x)=3; // char x[]="abc"; then the real memory is bool isUniqueChars(string str) { bool char_set[256] = {0}; for (int i = 0; i < str.size(); i++) { int val = str[i]; if (char_set[val]) return false; char_set[val] = true; } return true; } int main() { char str[20] = "abcd", xRepeat = ' '; bool iRepeat = false; int i = 0, len = strlen(str); for (i = 0; i < len;i++) { iRepeat = false; for (int j = 0; j < len; j++) { if (j!=i && str[j]==str[i]) { iRepeat = true; break; } } if(!iRepeat) { xRepeat = str[i]; break; } } cout<<"In the string of "<<str<<endl; cout<<"the first nonrepeat char is the "<< i+1 << " th char: "<<xRepeat<<endl<<endl; cout<<isUniqueChars(str)<<endl; return 0; } //Highlighted at http://tohtml.com/cpp/ //Bred 3 + C++