Thursday, December 11, 2008

Integrating Quartz and Spring using persistent jobs

I've recently found myself having a lot of jobs running in the background of our Java applications, often requiring changes to the scheduling based on my boss' whims. Combine this with the fact that our system's API is now seeing heavy use, and having the triggers configured in the Spring application context (and thus requiring a restart in order to change them) just doesn't cut it any more. Thus, I now find myself needing persistent jobs. However, there's not a lot of documentation out there for dealing with Quartz' persistent job feature in Spring, so I'm going to hopefully provide some help. To start, you need to create the quartz tables in whatever database you're going to be using. There are scripts for creating these tables in the Quartz distribution available on their website for whatever database you may be using. For convenience, I've copied the MySQL Inno DB script here :


DROP TABLE IF EXISTS QRTZ_JOB_LISTENERS;
DROP TABLE IF EXISTS QRTZ_TRIGGER_LISTENERS;
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
JOB_CLASS_NAME VARCHAR(250) NOT NULL,
IS_DURABLE VARCHAR(1) NOT NULL,
IS_VOLATILE VARCHAR(1) NOT NULL,
IS_STATEFUL VARCHAR(1) NOT NULL,
REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (JOB_NAME,JOB_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_JOB_LISTENERS (
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
JOB_LISTENER VARCHAR(200) NOT NULL,
PRIMARY KEY (JOB_NAME,JOB_GROUP,JOB_LISTENER),
INDEX (JOB_NAME, JOB_GROUP),
FOREIGN KEY (JOB_NAME,JOB_GROUP)
REFERENCES QRTZ_JOB_DETAILS(JOB_NAME,JOB_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_TRIGGERS (
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
IS_VOLATILE VARCHAR(1) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
NEXT_FIRE_TIME BIGINT(13) NULL,
PREV_FIRE_TIME BIGINT(13) NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE VARCHAR(16) NOT NULL,
TRIGGER_TYPE VARCHAR(8) NOT NULL,
START_TIME BIGINT(13) NOT NULL,
END_TIME BIGINT(13) NULL,
CALENDAR_NAME VARCHAR(200) NULL,
MISFIRE_INSTR SMALLINT(2) NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (TRIGGER_NAME,TRIGGER_GROUP),
INDEX (JOB_NAME, JOB_GROUP),
FOREIGN KEY (JOB_NAME,JOB_GROUP)
REFERENCES QRTZ_JOB_DETAILS(JOB_NAME,JOB_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
REPEAT_COUNT BIGINT(7) NOT NULL,
REPEAT_INTERVAL BIGINT(12) NOT NULL,
TIMES_TRIGGERED BIGINT(7) NOT NULL,
PRIMARY KEY (TRIGGER_NAME,TRIGGER_GROUP),
INDEX (TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(TRIGGER_NAME,TRIGGER_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_CRON_TRIGGERS (
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
CRON_EXPRESSION VARCHAR(120) NOT NULL,
TIME_ZONE_ID VARCHAR(80),
PRIMARY KEY (TRIGGER_NAME,TRIGGER_GROUP),
INDEX (TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(TRIGGER_NAME,TRIGGER_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_BLOB_TRIGGERS (
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
BLOB_DATA BLOB NULL,
PRIMARY KEY (TRIGGER_NAME,TRIGGER_GROUP),
INDEX (TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(TRIGGER_NAME,TRIGGER_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_TRIGGER_LISTENERS (
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
TRIGGER_LISTENER VARCHAR(200) NOT NULL,
PRIMARY KEY (TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_LISTENER),
INDEX (TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(TRIGGER_NAME,TRIGGER_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_CALENDARS (
CALENDAR_NAME VARCHAR(200) NOT NULL,
CALENDAR BLOB NOT NULL,
PRIMARY KEY (CALENDAR_NAME))
TYPE=InnoDB;

CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
TRIGGER_GROUP VARCHAR(200) NOT NULL,
PRIMARY KEY (TRIGGER_GROUP))
TYPE=InnoDB;

CREATE TABLE QRTZ_FIRED_TRIGGERS (
ENTRY_ID VARCHAR(95) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
IS_VOLATILE VARCHAR(1) NOT NULL,
INSTANCE_NAME VARCHAR(200) NOT NULL,
FIRED_TIME BIGINT(13) NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE VARCHAR(16) NOT NULL,
JOB_NAME VARCHAR(200) NULL,
JOB_GROUP VARCHAR(200) NULL,
IS_STATEFUL VARCHAR(1) NULL,
REQUESTS_RECOVERY VARCHAR(1) NULL,
PRIMARY KEY (ENTRY_ID))
TYPE=InnoDB;

CREATE TABLE QRTZ_SCHEDULER_STATE (
INSTANCE_NAME VARCHAR(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT(13) NOT NULL,
CHECKIN_INTERVAL BIGINT(13) NOT NULL,
PRIMARY KEY (INSTANCE_NAME))
TYPE=InnoDB;

CREATE TABLE QRTZ_LOCKS (
LOCK_NAME VARCHAR(40) NOT NULL,
PRIMARY KEY (LOCK_NAME))
TYPE=InnoDB;

INSERT INTO QRTZ_LOCKS values('TRIGGER_ACCESS');
INSERT INTO QRTZ_LOCKS values('JOB_ACCESS');
INSERT INTO QRTZ_LOCKS values('CALENDAR_ACCESS');
INSERT INTO QRTZ_LOCKS values('STATE_ACCESS');
INSERT INTO QRTZ_LOCKS values('MISFIRE_ACCESS');
commit;


Once this is done, you'll need to integrate Quartz with Spring. If you're using Maven 2, getting Quartz into your project is as simple as adding the following snippet to the <dependencies> section of your pom.xml configuration file.


<dependency>
<groupId>opensymphony</groupId>
<artifactId>quartz</artifactId>
<version>1.6.0</version>
<scope>provided</scope>
</dependency>


Note that you're going to need Quartz version 1.6.0 to work with Spring 2.5.3 or greater, otherwise you can use Quartz 1.5.x. After you've got Quartz into your project, you'll need to define a Scheduler that accesses your database to schedule your jobs. Fortunately, Spring happens to come with a handy FactoryBean for defining Quartz schedulers. I've used the following :


<bean class="org.springframework.scheduling.quartz.SchedulerFactoryBean">
<property name="jobFactory">
<bean class="org.springframework.scheduling.quartz.SpringBeanJobFactory"/>
</property>
<property name="dataSource" ref="mainDataSource" />
<property name="transactionManager" ref="mainDataSourceTransactionManager" />
<property name="quartzProperties">
<util:properties location="/WEB-INF/config/quartz.properties"/>
</property>
<property name="applicationContextSchedulerContextKey" value="applicationContext"/>
<property name="waitForJobsToCompleteOnShutdown" value="true" />
</bean>


The above Spring beans configuration snippet requires some explanation, so let's go through it.

  • The property 'jobFactory' is a Spring implementation of the Quartz JobFactory interface. When the scheduler encounters a trigger in the database that's to be fired, it loads the corresponding JobDetails from the database, loads it into a JobDetail object, places that JobDetail object into a TriggerFiredBundle, and passes the TriggerFiredBundle to the JobFactory interface to obtain a Job that's to be run. Now, you might think that because of the name of Spring's JobFactory implementation, it's going to do something with a bean defined in the Spring ApplicationContext, right ? Wrong. What this implementation does is get the Job class provided by JobDetail.getClass(), instantiate a new instance of it with a default no-arg constructor, possibly populate it with properties, and then run the Job. Which sucks, and is kind of useless, because no Jobs I need can do so without getting something from Spring's ApplicationContext. More on this later.

  • The 'dataSource' property should be the javax.sql.DataSource used by your application to connect to whatever database you're using that stores the persistent Jobs in the tables used by Quartz.

  • The 'transactionManager' property should be set to a org.springframework.transaction.PlatformTransactionManager implementation that's used to demarcate transactions in the database. If you don't have one for your dataSource already defined, then it's fortunate that Spring comes with one that you can define and use, like so :

    <bean id="mainDataSourceTransactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager" p:dataSource-ref="mainDataSource" />


  • The 'quartzProperties' property should be configured with a java.util.Properties instance containing configuration values for Quartz. A sample is posted here for you :

    org.quartz.threadPool.class=org.quartz.simpl.SimpleThreadPool
    org.quartz.threadPool.threadCount=5
    org.quartz.threadPool.threadPriority=4

    org.quartz.jobStore.tablePrefix=qrtz_
    org.quartz.jobStore.isClustered=false
    org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate

    This sample contains a few simple properties that should be enough to get you started. There are numerous others that you should look into in the Quartz documentation. The first three properties are for configuring Quartz' thread-pooling capabilities. The latter three are for configuring the database (and access thereto-) that Quartz uses for storing persistent jobs. The table prefix is used to prefix Quartz' tables so that they don't conflict with any pre-existing tables in your database. The 'isClustered' property is used to determine whether Quartz is acting in a cluster. Clustered Quartz configurations are beyond the scope of this article. The 'driverDelegateClass' property is used to determine the class Quartz will use for dealing with your specific dialect of SQL (ie MySQL, MS-SQL, PostgreSQL, etc).

  • The 'applicationContextSchedulerContextKey' property sets a key that's used to access the Spring ApplicationContext in the JobExecutionContext given to a Job implementation at run time after the SpringBeanJobFactory has populated it for you. This is important as this property does not have a default value and will not put the ApplicationContext into the JobExecutionContext for you unless you specify one.

  • The 'waitForJobsToCompleteOnShutdown' property specifies, well, I think you can figure that one out.



At this point in time, you've got the tables which Quartz requires for persistent jobs defined (and presumably accessible) in your database, you've got the Quartz libraries present in your classpath, and you've got Quartz configured within Spring so that Quartz will run and check the database for jobs to be run. However, we're still missing a few more important pieces of information.


  1. JobDetails stored in the database describing jobs to be run

  2. Triggers defined in the database for describing when JobDetails will be executed



For the rest of this article, lets assume that you've set your Quartz table prefix as 'qrtz_'. In your database, lets look at the definition of the 'qrtz_job_details' table. It includes columns for defining job_name, job_group, description, job_class_name, is_durable, is_volatile, is_stateful, requests_recovery, and job_data.

The 'job_name' field is, as you've almost certainly guessed, the name of the job that you want to define. The job must also have a 'job_group' specified because triggers can be used to signal the execution of entire groups of jobs at once, not just single jobs. The two of these fields together form a unique key for the table. If you really don't give a shit about grouping jobs, you can just use Quartz' default group name, which is 'DEFAULT'. You also need to set a 'job_class_name'. This value is a fully qualified Java class name that must have a default, no-arg constructor (ie it's a bean, according to the Java Beans specification). This class will get instantiated at run time by the SpringBeanJobFactory and run as a Quartz job. Note that this class, obviously, must implement the org.quartz.Job interface. A decription of the job can be put in the 'description' field if you want to have a log-friendly message available to you. The is_durable, is_volatile, is_stateful, and requests_recovery fields are all (essentially) boolean fields that Quartz defines as being single characters. These fields can have the values 'Y' or 'N'. See the Quartz API for org.quartz.JobDetail for further elaboration on the meaning of these fields. The 'job_data' field is a BLOB object that's used to serialize the 'jobDataMap' (java.util.Map) associated with the JobDetail. In order to populate this field, you'll obviously have to write some JDBC code.

The 'qrtz_job_details' table is, obviously, where you store the information for the org.quartz.Jobs you want to run. Anytime you need to define a new job, you'll need to insert a new row into this table, properly configured of course and paying particular attention to the 'job_class_name' field.

Ok, so assuming you've got a couple rows in this table (ie defined a couple of jobs), now you'll need to schedule them so that, at some point, they'll actually run. This is where triggers come in. Now, in Quartz, there are a few different methods of triggering off Jobs, by far the most common being org.quartz.SimpleTriggers (qrtz_simple_triggers table) and org.quartz.CronTriggers (qrtz_cron_triggers table). SimpleTriggers have their use, but our company uses some pretty complex scheduling at times, so I'm going to do an example of using the cron triggers.

The 'qrtz_cron_triggers' table is pretty simple, it only has four fields : 'trigger_name', 'trigger_group', 'cron_expression' and 'time_zone_id', all of which should be pretty indicative of their content. The 'time_zone_id' field should be a valid id for a java.util.TimeZone, ie 'America/Denver'. See the documentation for that class for more on valid IDs.

Once you've set up a trigger with a valid name, group (ie DEFAULT), cron expression and TimeZone ID, you're pretty much ready to go. When Quartz starts up with your application, it'll consult the 'qrtz_triggers' table and ensure that entries exist for all of your corresponding cron / simple / blob triggers. It'll then proceed to fire off triggers and the Scheduler will pass off jobs to the SpringBeanJobFactory for execution. Now, if you haven't figured it out already, if you've got a lot of jobs, so far from what this article has shown you, you'll have to create a Quartz Job implementation for each job you want to run off, and even then, it'll have to be pretty simple, since you'll have to be able to instantiate it with a no-arg constructor, and so far you've got no way of injecting it with anything from Spring, aside from simple properties that are defined by the 'schedulerContextAsMap' property on the Spring SchedulerFactoryBean. So, what I did was create a class that uses its JobDetail#name as a convention for getting a bean from the application context, assumes it's an org.quartz.Job implementation, and executes it. The code is attached :


public class SpringBeanDelegatingJob implements Job {

private static final Log LOGGER = LogFactory.getLog(SpringBeanDelegatingJob.class);

public static final String APPLICATION_CONTEXT_KEY = "applicationContext";

@SuppressWarnings("unchecked")
public void execute(JobExecutionContext arg0) throws JobExecutionException {

JobDetail jobDetail = arg0.getJobDetail();

String beanName = substringBefore(jobDetail.getName(), "Detail");

if (LOGGER.isInfoEnabled()) {
LOGGER.info ("Running SpringBeanDelegatingJob - Job Name ["+jobDetail.getName()+"], Group Name ["+jobDetail.getGroup()+"]");
LOGGER.info ("Delegating to bean ["+beanName+"]");
}

ApplicationContext applicationContext = null;

try {
applicationContext = (ApplicationContext) arg0.getScheduler().getContext().get(APPLICATION_CONTEXT_KEY);
} catch (SchedulerException e2) {
throw new JobExecutionException("Holy fuck, there was some kind of god-damned problem with the fucking Scheduler", e2);
}

Job bean = null;

try {
bean = (Job) applicationContext.getBean (beanName, Job.class);
} catch (BeansException e1) {
throw new JobExecutionException("Unable to retrieve target bean that is to be used as a job source", e1);
}

bean.execute (arg0);

return;
}

}


Now, this makes the persisted jobs a lot more capable : we can now run org.quartz.Job beans that are declared in our Spring ApplicationContext. What if we want to be able to run arbitrary methods on arbitrary beans as jobs ? Well, we can write an adapter class for that too, quite similar (though not as functional as) Spring's MethodInvokingJobDetailFactoryBean :


public class SpringBeanMethodInvokingJob implements InitializingBean, Job {

private Object targetBean;
private String targetMethod;

//Constructors

public SpringBeanMethodInvokingJob() {
super();
}

//Behaviour Methods

public void execute(JobExecutionContext arg0) throws JobExecutionException {

Method method = null;

try {
method = targetBean.getClass().getMethod(targetMethod);
} catch (Exception e) {
throw new JobExecutionException("Unable to get targetMethod ["+targetMethod+
"] on bean with class ["+targetBean.getClass().getName()+"]");
}

try {
method.invoke(targetBean);
} catch (Exception e) {
throw new JobExecutionException("Unable to invoke method ["+method.getName()+"] on bean ["+targetBean.toString()+"]");
}

return; //done
}

public void afterPropertiesSet() throws Exception {
Assert.notNull(targetBean, "'targetBean' cannot be null");
Assert.isTrue(isNotBlank(targetMethod), "'targetMethod' cannot be blank");
}

//Property Accessors

@Required
public final void setTargetBean(Object targetBean) {
this.targetBean = targetBean;
}

@Required
public final void setTargetMethod(String targetMethod) {
this.targetMethod = targetMethod;
}

}


Note that the class above is implements org.quartz.Job, not org.quartz.JobDetail as is the product of MethodInvokingJobDetailFactoryBean. I hope this article has been useful for anybody reading it, feel free to comment on it.

Friday, December 05, 2008

The greatest MySQL companion ever

I just recently found out about the 'show processlist' command in MySQL that lets you get a resultset of all the processes MySQL is currently using and what query they're running. Today I was working in top monitoring the performance of our servers as I restarted our production server. MySQL popped up briefly on the top list of running processes and it occurred to me, "Wouldn't it be great of there was something like this for MySQL?". So while I was waiting, I popped open google, and ran a query for a MySQL and top. To my surprise, up came mytop. It's literally top for MySQL and it's so ridiculously useful you wouldn't believe it. Even better, it's got a .deb made for it and it's in the Ubuntu repositories, so installing it is as simple as running :

apt-get install mytop


It's a good day today :)

Setting a user's password in MySQL

I've recently found the need to change a bunch of passwords in MySQL. The command to do it is as follows :

SET PASSWORD FOR 'myuser'@'%.wherever.com' = PASSWORD('newpass');


You can also read the documentation on the MySQL website.

Wednesday, December 03, 2008

Finding out what MySQL is doing

I was recently updating our production system when I noticed that our main MySQL database was going unusually hard. On a quad-core processor system, it was using up three processors simaltaneously. This prompted me to find out what MySQL is doing, and after being unable to read the logs, I did a Google search and ran across this useful little command :

show processlist


This runs a query (can be run from either the command line or the query browser) and shows you a summary of all the processes that MySQL is currently using.

Wednesday, November 26, 2008

Migrating from Spring Webflow 1 to Webflow 2

...was relatively painless. At least in the very small app that I'm currently working on as a helper application for dealing with one of our partners' systems. The flow definition migration assistant that comes with Webflow 2 is a huge help. There are, however, a few things to take note of when it comes to differences between Webflow 1 and Webflow 2. They are significantly different beasts to say the least :



  • My configuration went from this (in Webflow 1) :



    <flow:registry id="flowRegistry">
    <flow:location path="/WEB-INF/flows/**/*-flow.xml"/>
    </flow:registry>

    <flow:executor id="flowExecutor" registry-ref="flowRegistry">
    <!--flow:execution-listeners>
    <flow:listener ref="webflowDebugListener"/>
    </flow:execution-listeners-->
    </flow:executor>

    <!--bean id="webflowDebugListener" class="org.springframework.webflow.execution.DebuggingListener"/-->

    <bean name="flowController" class="org.springframework.webflow.executor.mvc.FlowController">
    <property name="flowExecutor" ref="flowExecutor" />
    <property name="argumentHandler">
    <bean class="org.springframework.webflow.executor.support.RequestParameterFlowExecutorArgumentHandler" />
    </property>
    </bean>


    To this (in Webflow 2) :



    <webflow:flow-registry id="flowRegistry" flow-builder-services="flowBuilderServices">
    <webflow:flow-location-pattern value="/WEB-INF/flows/**/*-flow.xml"/>
    </webflow:flow-registry>

    <webflow:flow-executor id="flowExecutor" flow-registry="flowRegistry">
    <webflow:flow-execution-listeners>
    <!-- webflow:listener ref="webflowDebugListener"/ -->
    <webflow:listener ref="securityFlowExecutionListener"/>
    </webflow:flow-execution-listeners>
    </webflow:flow-executor>

    <bean id="securityFlowExecutionListener" class="org.springframework.webflow.security.SecurityFlowExecutionListener" />

    <webflow:flow-builder-services id="flowBuilderServices" view-factory-creator="viewFactoryCreator" conversion-service="webflowConversionService"/>

    <bean id="webflowConversionService" class="com.mypackage.modules.springframework.webflow2.ConversionServiceFactoryBean">
    <property name="editorMappings">
    <map>
    <entry key="java.util.Calendar"><idref bean="sqlDateCalendarEditor"/></entry>
    </map>
    </property>
    </bean>

    <bean id="viewFactoryCreator" class="org.springframework.webflow.mvc.builder.MvcViewFactoryCreator">
    <property name="viewResolvers">
    <list>
    <ref local="xmlViewResolver"/>
    <ref local="decoratedJstlViewResolver"/>
    <ref local="urlBasedViewResolver"/>
    </list>
    </property>
    </bean>

    <bean id="webflowDebugListener" class="org.springframework.webflow.execution.DebuggingListener"/>

    <bean name="flowController" class="org.springframework.webflow.mvc.servlet.FlowController">
    <property name="flowExecutor" ref="flowExecutor" />
    <property name="flowUrlHandler">
    <bean class="org.springframework.webflow.context.servlet.DefaultFlowUrlHandler" />
    </property>
    </bean>


    Now, you may be asking yourself, "Why would there be more configuration in Spring Webflow 2 ? Doesn't Spring generally improve on things like configuration syntax between major versions ? The answer is, of course, yes. However, there are a number of new features that come with Webflow 2 that need to be configured, hence the extra beans for configuration. Note also, that the syntax has changed. I direct your attention to the added 'flow-' prefixes on element names of the previously existing Webflow elements.




  • Webflow 2 has much better, much closer integration with Spring Security, hence the need for SecurityFlowExecutionListener listed above. I haven't had a chance to play around with the new integration between Spring Security 2 and Spring Webflow 2, but I'm quickly getting there.


  • Thanks to the built in model binding and validation that comes with Spring Webflow 2, there's much less syntax for views that simply bind to a model object on transitions. Instead of having to add in FormActions and adding action elements to the view-state you want them applied to, you can now simply specify a model name for the 'model' attribute of the view state, and Webflow 2 will automatically bind the form object for you (which leads us into our next point). Of course, the Spring developers maintained their usual style of keeping things as configurable as possible, so you can not only choose not to bind the model object for particular state transitions, but you can disable validation as well, using the new 'bind' and 'validate' attributes of Webflow 2 'transition' elements.


  • Because Webflow 2 can now do binding and validation for us right in the flow definition without having to define FormAction beans in our Spring ApplicationContext, Webflow 2 comes with its own updates to the spring-binding framework, hence the need for the 'webflowConversionService' bean in the configuration above. Now, you're probably saying to yourself right now, "Well what the fuck am I supposed to do with the custom PropertyEditors I wrote for my objects when dealing with Spring / Webflow 1 ?". Don't worry, all is not lost. There's a PropertyEditorConverter class that adapts PropertyEditor implementations to the new Webflow 2 Converter interface. You can use this to interface your editors with Webflow 2, and in fact that's what I did.


  • Webflow 1 left it up to the Spring framework to resolve and render views. Some people found this inconvenient, so Webflow 2 now allows you to have Webflow directly resolve and render views itself. However, since I'm already using the Spring framework and don't need Webflow's abstraction away from the underlaying framework, I chose to keep the Webflow 1 style of resolution and view rendering, hence the need for the 'viewFactoryCreator' bean above.



Once all the new configuration was done and out of the way, it was simply a matter of upgrading my existing flows to the new Webflow 2 syntax, which was almost entirely taken care of by the migrator that comes with Webflow 2. Previously, I used the 'var' element heavily to make use of beans in my Spring ApplicationContext, but the 'var' element has since had its usage completely reworked for Webflow 2, so I'm going to have to refactor a couple of my flows and change my style of design with Webflow 2.

One other feature that I'm learning about as I write this post is the Webflow 2 PersistenceContext. It's a solution for the use case where you want to be able to load objects from a database, and only allow a single user to be able to access them at once, ie you wouldn't want multiple people logging into a database and being able to access data on a particular user at the same time and potentially overwrite each others' changes. I'll post more about that later.

Friday, November 21, 2008

Migrating from Acegi Security to Spring Security 2

In an effort to keep our applications current, I've been looking at upgrading from Acegi Security to Spring Security 2 in one of our smaller applications as a test, to see how well it'd go over for our larger applications. So far, the tests are pretty positive. The main advantage to the migration that I can see is improved Web Service security (which I'll definitely be needing), as well as a considerably simplified configuration syntax. The following is a small migration guide :

0. If you haven't already, update your project to use Spring 2.5.4 or greater. This is necessary because of changes to Spring that Spring Security (as well as other Spring subprojects) requires. If you're using Maven 2, this is dead easy, you need only update the <version> elements of the appropriate dependencies.

1. Change all package references starting with 'org.acegisecurity' to 'org.springframework.security' in your code. If you've architected your system right, this should mean very few changes.

2. In your POM, remove all mention of Acegi security and replace it with the following :

<dependency>
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-core-tiger</artifactId>
<version>${spring.security.version}</version>

<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
</exclusion>

<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-support</artifactId>
</exclusion>
</exclusions>

</dependency>
...
<dependency>
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-taglibs</artifactId>
<version>${spring.security.version}</version>

<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
</exclusion>

</exclusions>
</dependency>


The exclusions are necessary to prevent Spring 2.0.x dependencies from being unnecessarily pulled in.

3. Go through your JSPs and remove all xmlns:authz="..." declarations and replace them with :


xmlns:security="http://www.springframework.org/security/tags"


4. Go through your applicationContext*.xml files and replace any constants you may have referring to Acegi security with their proper Spring Security 2 counterparts. This means replacing the package prefixes 'org.acegisecurity' with 'org.springframework.security' and in some cases replacing ACEGI with SPRING_SECURITY in some static constant names.

This migration guide was based on (and expanded from) the one provided by Matt Raible. If you get the time, I recommend you check out his site, he has a lot of good wisdom and examples to share.

On Hibernate UserTypes

Hibernate UserTypes are a great way for persisting complex classes which may require more than one column in a table in order to be properly represented in a database. Many people have used them successfully in order to be able to simplify storage of complex objects and thus simplify their applications at the same time. One simpler use case of them was to create Enum types before support for Java 5+ enums came along.
Now, you'd think that once you've got Java 5 support with Hibernate, you'd never have to use such a simple use case again because you've got the Enums themselves, along with the @Enumerated annotation you can place on fields and properties, right ? Wrong. I've recently started migrating my projects to Spring Security from Acegi Security, and I've found that in the process, they've made the GrantedAuthority interface extend Comparable. This conflicts with the fact that Java 5+ Enums already implemented the Comparable interface with a generic parameter, and as such your code won't compile if you have an Enum that also implements GrantedAuthority, such as the following :


public enum Role implements GrantedAuthority, ConfigAttribute
{
ROLE_ADMINISTRATOR;

public String getAuthority() {
return name();
}

public String getAttribute() {
return name();
}

public String getMessageKey() {
return "enum.Role.".concat(name());
}
}


In order to remedy the conflict, I've had to convert my nice, pretty, simple Enum into a class, and simulate Java 5+'s enum behaviour, like so :


public final class Role implements GrantedAuthority, ConfigAttribute, Serializable, Cloneable {

private static final long serialVersionUID = 1L;

public static final Role ROLE_ADMINISTRATOR = new Role(0, "ROLE_ADMINISTRATOR");

private static final Role[] VALUES = new Role[] {
ROLE_ADMINISTRATOR
};

private static final Map NAME_MAPPINGS = new HashMap();

static {
Arrays.sort(VALUES, new Comparator() {
public int compare(Role o1, Role o2) {
return o1.ordinal - o2.ordinal;
}
});

for (Role r : VALUES) {
NAME_MAPPINGS.put (r.getName(), r);
}
}

private int ordinal;
private String name;

//Constructors

/*
* DO NOT EVER USE THIS! It exists only for serialization purposes.
*/
public Role() {
super();
}

private Role(Role r) {
this(r.ordinal, r.name);
}

private Role(int ordinal, String name) {
super();
this.ordinal = ordinal;
this.name = name;
}

//Behaviour Methods

public int compareTo(Object o) {

if (o == null || o.getClass() != Role.class) {
throw new IllegalArgumentException(
"Comparison object may not be null, and must be a Role");
}

return this.ordinal - ((Role) o).ordinal;
}

public String getAuthority() {
return getName();
}

public String getAttribute() {
return getName();
}

//Pseudo-properties

public String getMessageKey() {
return "enum.Role.".concat(getName());
}

//Property Accessors

public final int getOrdinal() {
return this.ordinal;
}

public final String getName() {
return this.name;
}

//Helper Methods

public static final int hashCode(Role r) {
return r == null ? 0 : r.hashCode();
}

public static final boolean equals(Role x, Role y) {
return x == null ? (y == null) : x.equals(y);
}

public static final Role clone(Role r) {
return r == null ? null : r.clone();
}

public static final Role[] values() {
return VALUES;
}

public static final Role valueOf(String s) throws IllegalArgumentException {

String key = defaultString(s).toUpperCase();

if (NAME_MAPPINGS.containsKey(key)) {
return NAME_MAPPINGS.get(key);
} else {
throw new IllegalArgumentException("No role by the name ["+s+"] exists");
}
}

//Object Overrides

@Override
public String toString() {
return new StringBuffer().append(this.name)
.append("(").append(this.ordinal).append(")")
.toString();
}

@Override
public Role clone() {
return new Role(this);
}

@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.name == null) ? 0 : this.name.hashCode());
result = prime * result + this.ordinal;
return result;
}

@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final Role other = (Role) obj;
if (this.name == null) {
if (other.name != null)
return false;
} else if (!this.name.equals(other.name))
return false;
if (this.ordinal != other.ordinal)
return false;
return true;
}

}


Once this was done, I created a Hibernate UserType implementation for Role, so that Hibernate could persist Roles as Integers, just as was already being done for the Enum version of Role:


public class RoleUserType implements UserType {

private static final int[] SQL_TYPES = new int[] { Types.INTEGER };

public static final String HIBERNATE_TYPE_NAME = "RoleUserType";

public Object deepCopy(Object value) throws HibernateException {
return value;
}

public Object assemble(Serializable cached, Object owner) throws HibernateException {
return (Role)cached;
}

public Serializable disassemble(Object value) throws HibernateException {
return (Role)value;
}

public boolean equals(Object x, Object y) throws HibernateException {
return Role.equals((Role)x, (Role)y);
}

public int hashCode(Object x) throws HibernateException {
return Role.hashCode((Role)x);
}

public boolean isMutable() {
return false;
}

public Object nullSafeGet(ResultSet resultSet, String[] names, Object owner) throws HibernateException, SQLException {
int roleOrdinal = resultSet.getInt(names[0]);

return resultSet.wasNull() ? null : Role.values()[roleOrdinal];
}

public void nullSafeSet(PreparedStatement statement, Object value, int index)
throws HibernateException, SQLException {
if (value == null) {
statement.setNull(index, Types.INTEGER);
} else {
statement.setInt(index, ((Role)value).getOrdinal());
}
}

public Object replace(Object original, Object target, Object owner) throws HibernateException {
return original;
}

@SuppressWarnings("unchecked")
public Class returnedClass() {
return Role.class;
}

public int[] sqlTypes() {
return SQL_TYPES;
}

}


Once the Hibernate UserType implementation was written, it was just a matter of adding my model package to Hibernate's list of annotated packages (using Java 5 after all) :


@TypeDef(name = RoleUserType.HIBERNATE_TYPE_NAME, typeClass = RoleUserType.class)
package com.mypackage.model;

import org.hibernate.annotations.TypeDef;
import eu.alenislimited.acshelper.support.hibernate.RoleUserType;


After defining the Hibernate UserType type, it was then just a matter of going around to all my fields that used Role, and replacing ...


@Enumerated(EnumType.ORDINAL)


... with ...


@Type(type = RoleUserType.HIBERNATE_TYPE_NAME)


Fortunately, doing the conversion was just extra work, and didn't require any special changes to any of my Spring configuration, or even my Hibernate configuration (beyond adding the @TypeDef to my model package and adding it to my Hibernate SessionFactory's list of annotatedPackages).

Tuesday, November 04, 2008

A first time gem creator's addendum

I recently started researching how to create Ruby Gems because I was working on on a Ruby application that is a reference application for accessing our production system at work, and I wanted to make the specific access code for our production system a Gem in Ruby so that I could just hand it off to future clients who want to integrate with Ruby. Further to that end, I read about the Newgem gem, and so I decided to use it, since it had recently hit 1.0.1. You can read the documentation there, so I won't repeat it here, but there's a couple of things you should do, prior to following their instructions.

  1. Set a 'HOME' environment variable if you're using Windows (as I am)

  2. gem install cucumber newgem

  3. rake config_hoe


The first step is to install newgem, but to also ensure that cucumber is installed as well because it's an unlisted dependency. The second step is to ensure that there's a basic .hoerc file created in your home directory that's ready to go whenever you want to run any tasks on your gem while testing / packaging it. I'm guessing it's automatically created in Linux, but I had to manually set the 'HOME' environment variable and run the configuration task myself in order to have the proper configuration file generated for me to run the tasks for creating and testing my gem.

Wednesday, October 29, 2008

A quick note on ActiveResource in Ruby on Rails

I've learned a bunch of new things about Ruby on Rails in the past few days, specifically when it comes to RESTful web services.


  • ActiveResource uses the built in Ruby XML::Mapping library for its OXM (Object to XML mapping) layer

  • Apparently, no Ruby library out there properly handles namespaces. Not a one : REXML, ROXML, XML::Mapping

  • ActiveResource can be highly customized to adjust to other RESTful web service schemes, just not when it comes to the generated XML.



Unfortunately this makes it very hard to integrate (in ruby) with the RESTful web service that I've written for our company's production web site. Ironically, I wanted to make the service very Ruby friendly. I really wish there was more in depth documentation out there for Ruby APIs. It would have made things so much easier when I was designing our API, which runs in Java, using JAXB.

Monday, October 13, 2008

A quick note on Maven plugins

I've recently had to go abroad to various other countries for work, and I've found that porting my projects around to various workspaces hasn't been as easy as it otherwise should have been, and here's why : I use Maven, especially the plugins. I've used others, and I've even written my own. I've also learned that one should specify versions of all plugins used, because the behaviour of a plugin may change between versions and if you're unaware of that and you start getting strange behaviour from your project build, this can lead to hours of wasted time trying to figure out what the hell is going on. I'm referring specifically to the maven-war-plugin for building web application WAR files. I had previously left the version unspecified, and when I ported my workspace to my laptop, Maven chose to use a different version of the plugin than that of my desktop, and as a result was including files I didn't want in my WAR file. This led to classpath errors, and me spending several hours trying to find out where the problem was. So in short, always specify versions of the plugins you use to ensure consistent behaviour in your build, wherever you may be working on your project.

Friday, August 29, 2008

Reflection in Ruby

I've recently gotten started going on a Ruby on Rails project for my company, that's meant to be a simple app that we'll throw up and let run for any prospective customers. But I ran into a problem with copying property values between objects, since I'm not as familiar with the language as I'd like to be. The Object type defines a method called 'send' which lets you invoke a method with given arguments. However, I wanted to be able to assign property values via this method, and just sending the name of the property as the name of the method to invoke wasn't working. (Works just fine if you want to get the property). As it turns out, the method for invoking the setter of a property is brain-dead simple : use the name of the method as you'd define it if you were overriding the setter for the property. For example, if you have a property called 'name', you'd define the setter for it as follows :

def name= (name)
@name = name
end

So if you wanted to set the value of this property reflectively, you'd do something like :

@person_instance.send "name=", "John Smith"

Saturday, August 16, 2008

A moment of developer clarity and zen

I had a moment today where I was just letting my mind wander. It was one of those moments where you have a brilliant idea that just seems so common sense that you can only ask yourself "Why the hell didn't I think of that before ?" This moment of clarity happened to be concerning my horribly slow laptop, with it's horribly old ATA 100, 5400 rpm hard drive. Then it occurred to me : just like with video encoding that I had setup months ago, I could setup a RAM disk to speed up Eclipse by storing my workspace on the RAM disk to speed up access to all the (ridiculous number of) temp files that Eclipse uses. On Ubuntu Linux, it turns out this was ridiculously easy : http://ubuntuforums.org/showthread.php?t=182764

The really amazing part was just how much this sped things up : it took my project build time from 6 mins, 30 seconds down to 35 seconds. And those are real numbers (plus or minus a couple seconds, due to slight variations in repeated builds.)

Monday, August 11, 2008

More JAXB quirks

Today I ran across yet more quirks of JAXB 2.0. I'm getting really tired of running across stupid errors that are difficult to debug because the programmers who wrote JAXB are lazy. Today, I was receiving a java.lang.InternalError when I tried to compile a schema from beans I'd recently refactored. The error I received was something along the lines of "could not escape schema namespace". As it turns out, it was caused by the lack of an @XmlType annotation on the enums I had added to one of my XML serializable beans. I hope this helps anybody who has the same problem.

Wednesday, July 30, 2008

A quick note about JAXB

JAXB (the Java API for XML Binding) is a great piece of technology (2.x, not 1.x). However, if you're starting an application and your JAXBContext is throwing an exception saying that it's unaware of a given class, there's a quick fix. At this point, I'm guessing you've configured your JAXB context to use a package, and the class that the JAXBContext is unaware of is in the given package, and you're scratching your head as to why it can't seem to find a class that's in the proper package. Well, here's the most probable answer : if you're using package-configured JAXBContexts, you need to have factory methods for all top level elements, similar to the following :



/**
* Create an instance of {@link PurchaseOrder }
*
*/
public PurchaseOrder createPurchaseOrder() {
return new PurchaseOrder();
}



... assuming that mypackage.PurchaseOrder is the class the JAXBContext is complaining that it's unaware of. This little quirk took me a couple hours to debug because I had just regenerated some web services code using Apache CXF 2.1.1 from an updated WSDL document for an updated Web Service that our company uses. Little did I know, that in comparing the two ObjectFactory files in Eclipse, I had missed copying over certain important functions. Hopefully this helps you.

Friday, July 11, 2008

Pure FTP on Ubuntu


Pure FTP is a great FTP server and I love it because it's ridiculously fast, reliable and works well for our company. The only problem is that it's tricky to configure on Ubuntu because just like with numerous other programs, the people at Ubuntu, in their infinite wisdom, felt the need to fuck with the program's default way of doing things. Specifically, in Ubuntu, command line arguments for starting the FTP service are done through individual files for each command line option you want the service to be started with, with values for the options placed inside each of the files. This is a stupid way of doing things, but that's not the topic of this post.


The problem I've been having lately is that I recently deployed new projects within our production network, and these new projects required access to the FTP server, and aren't on the same box as the server, as is the sole other project that has been using the FTP server. I'd been getting problems trying to connect to the server from any LAN box, but not any external boxes nor the machine itself (localhost). The error I got back was "421 Service not available". I googled around for hours and found nothing useful, until I started realizing that other people were getting 421 errors when their PureFTP instance was misconfigured, but with different messages, and then it got me thinking that maybe my instance was somehow misconfigured.


I re-read the documentation for PureFTP and after an hour or so, it hit me that the server does reverse lookups to resolve fully qualified names, and such resolution doesn't work properly on our network (for good reasons that I'm not going to go into). After disabling reverse DNS resolution with the -H startup option (`echo "yes" >> DontResolve` in the configuration directory in Ubuntu), the problem went away.


I hope this helps anybody else who runs into the same problem.

Tuesday, July 08, 2008

More Tomcat quirks

I recently ran across a problem with receiving a LinkageError in one of my web applications. The problem occurred when Acegi security tried to use libraries that were not yet loaded and tried to load them itself. Strangely, the error only occurred while using my RESTful API with a Spring Framework controller. If I logged in through my web interface which uses Acegi and Spring MVC, then the linkage error didn't occur.

After numerous hours of Googling around, I found a small company's JIRA issue tracker that reported the same bug, but also that it only occurred when using the security manager. After disabling the security manager on my own Tomcat instance, I found that the problem went away. I'd very much like to have a better resolution, but it's going to have to do for the time being.

Monday, July 07, 2008

Tomcat quirks

In recent weeks, we've had problems with our company's servers going down because of power problems at our colocation provider. As a result, the fact that Tomcat hadn't been properly configured to boot at startup had really burned us. Consequentially, I've been spending the past few days troubleshooting why the servers wouldn't startup, and ensuring that they do come up properly at boot time in future. The following are some of the things I've run across.



  • The JAVA_HOME and JRE_HOME variables must be set. If they're not, then Tomcat won't start, at least on Ubuntu Gutsy Gibbon Server. This is a problem I encountered with the default installation of JIRA on one of our production servers.


  • When using EhCache with Hibernate, the tmp directories don't get deleted, regardless of whether or not Tomcat is shutdown properly. This will block the container from starting up again, so these temp files must be deleted. I encountered this problem with my own applications.

Wednesday, July 02, 2008

Finally! The trick to proper Java Date and Calendar formatting

It's well documented in the Java API as well as numerous forums and blogs, that the Java API is not properly implemented, and in some respects flat out broken, hence why Java 7 is going to have a brand spanking new Date/Time API based on the Joda Time library hosted at sourceforge.net. In implementing Dates / TimeZones in Java, the developers of Java saw fit to separate time zones from date implementations because, really, a date is just a number of milliseconds from Jan 1, 1970 at 00:00:00.000 GMT in the morning. This is a noble goal which I whole heartedly agree with : dates should be represented separately from their timezones because the printed representation of a date will change depending on time zone, whereas no matter where you are in the world, the number of milliseconds from the Epoch has not changed. However, the Java developers implemented this concept poorly. They chose to attach TimeZones to Calendars and then not have them affect any of the time fields, which is fine. But the TimeZone attached to a calendar is not used when using a DateFormat directly. Here's the key : the DateFormat object uses its own internal TimeZone during formatting (which it takes from the currently running JVM) by default. In order to format using the TimeZone attached to a Calendar, you have to explicitly set the TimeZone of the DateFormat to that of the Calendar instance you want to format.

Friday, June 13, 2008

An interesting 'accesskey's quirk

Since I learned about accesskey support in HTML, I've been including them where appropriate in my web applications to speed up my access and usage of said applications. However, when trying to use the accesskey for a password field, P, I ran across an interesting quirk. The accesskey modifier is different for various browsers. In order to use access keys in IE, the modifier is Alt, for Firefox it's Alt+Shift, for Opera you press Shift+Esc, then the accesskey. I primarily use Firefox (go open source), so when I pressed Alt+Shift+P to access a password field, it opened Windows Media Player. As it turns out, this is the hotkey for accessing WMP from the toolbar (ie the miniplayer). To prevent this hotkey combination from opening WMP, you'll have to disable the toolbar by going to the taskbar, Right-Click -> Toolbars and uncheck Windows Media Player (I don't like the mini-player toolbar that much anyway).

Wednesday, June 11, 2008

Spring Framework URL mapping quirks

My recent foray into developing using RESTful principles and resources has lead me to an interesting quirk in the Spring Framework, which I use for our company's primary application. As it turns out, the URL mapping scheme requires that you set 'alwaysUseFullPath' to true so that the entire path of a URL that gets submitted to the framework gets inspected to determine the proper controller mapping. Otherwise, it can lead to some quirks, like not submitting the request to the proper controller. I had to learn this the hard way through several hours of debugging.

Thursday, June 05, 2008

Why you should always export your sources with your Java projects

With the advent of popular and widespread Maven 2 adoption, there's no reason you shouldn't be using it to build your open-source or closed source Java projects. With that in mind, it's dead easy to use Maven's 'source' plugin to automatically generate a separate JAR with your project's sources in it when you package and deploy your project. Posting the sources with your project not only allows other people to easily debug their code while using your project, but also serves as a backup in the event of a catastrophic failure of a computer hosting your code for which you did not have proper backup procedures already in place. In the event that you couldn't tell, that happened to me tonight.

JAXB schemagen - Finally got it working

For those not in the know, schemagen is a program that comes as part of the Java Web Services Developer Pack and it's used to generated XML schemas from JAXB annotations on Java beans. It so happens that the creators of the JAXB reference implementation also made a Maven 2 plugin for this program. I had previously attempted to use it to generate schemas in my initial attempts with JAXB, but had been presented with a slough of unfriendly exceptions being thrown at me whenever I ran the plugin. Not having the time back then to really play around with it and being considerably less experienced with JAXB, I had to shelve it and find other, less satisfying solutions to my problems with generating documentation.


Recently, I've had to come back to using JAXB because I'm making a RESTful Web Services API that leverages the power of both JAXB and Hibernate to do all my heavy lifting for me. This time around, I wasn't willing to tolerate a lack of schema to give to our users of the API, because it would mean a lot more work for them, and a lot more work for me. This time, I decided to be persistent and dig around the jaxb-schemagen plugin to make sure I could get it working. I'm proud to say that my perseverance paid off, and I now have my RESTful API schema being automatically generated for my application. Here are the problems and the solutions I ran into when I was trying to get going on this:



NullPointerException

I came across persistent NullPointerExceptions while trying to run the plugin at first. The stack trace indicated that the problem was with apt, the Java annotation processing tool. I later discovered that in order to properly map your classes into XSD schema types, you must specify an @XmlType annotation on *all* of the classes you wish to map (at least with JAXB 2.0, which is the version I'm currently using)

ClassCastException

After I resolved the first problem, I came across another pesky exception that likewise did not provide any useful debugging information. The plugin output indicated that it was finding annotations that it didn't know how to deal with, specifically the Java Persistence API annotations that I was using for Hibernate. After much digging around on Google and some detective work, I discovered that any annotations encountered by the jaxb-schemagen plugin had to be on the Maven runtime classpath. I double checked my POM to find that I had specified the scope of the JPA / Hibernate annotations as 'provided', which instructs Maven not to load them into either the compile or runtime classpaths. The initial reason I had for specifying the dependency scope as 'provided' was so that they would not needlessly get included with my project WAR files. I changed their scope to 'compile' (which also includes 'runtime') and voila, apt could now properly detect the unknown annotations, and get past them to deal with the JAXB annotations it needs to create the XML schema.



If anybody else encounters similar errors with JAXB-schemagen, I hope they find this page and find it useful.

Tuesday, June 03, 2008

Fixing package retrieval quirks in Ubuntu Gutsy Gibbon Server

In the default installation of Ubuntu Server (Gutsy Gibbon), the default source of packages is the CD-ROM drive. That's fine if you make the assumption that there will always be somebody physically present in order to maintain the servers, but for most server's these days, that's a fallacious assumption. The really bad part about this default, is that there's no way I know of at the time of this writing to override this on the update command. In our company, we have our servers hosted off-site like so many other companies, so everything must be done remotely. Therefore, the CD-ROM as a source for Ubuntu packages is a bad idea. To remedy this, you have to go into the apt-get configuration stored at
/etc/apt/sources.list
, and you'll have to find a line similar to :

deb cdrom:[Ubuntu-Server 7.10 _Gutsy Gibbon_ - Release i386 (20071016)]/ gutsy main restricted


Comment out this line, try your update again and you should be good to go.

Tuesday, May 27, 2008

Fixing Messenger when you've accidentally screwed it up

If you're like me, you like to have your Windows Live Messenger conversation windows be as simple and clean as possible, with no toolbars, and no contact pictures. To that end, I removed the toolbars from my messenger contact windows so that I wouldn't have to see them. However, this became a problem one day when I dragged a picture that I intended to send to someone into a conversation window, and instead of sending it to them, Messenger permanently changed the background of my chat windows with that person to that picture. To resolve this, I had to open a chat window with person X, press the Alt key to get the old-school windows standard toolbar to pop-up at the top, and then go to Tools -> Show toolbars -> Standard. Then from the standard toolbar, you can click on the 'Show Menu' button on the top right of the conversation window from the standard toolbar, and go to Tools -> Backgrounds ... . This wil take you to a screen where you can select a blank default background. However, dragging the picture may have changed the default background colour for the window, as it did in my case. To remedy this, you have to change the background colour to match the rest of the Messenger colour scheme, and you can do so by clicking on the paintbrush icon on the standard toolbar, and selecting a default background colour from there to make things consistent with the rest of the Messenger colour scheme.

Monday, May 26, 2008

How to recurlively delete a folder and its contents in PowerShell

The command :
Remove-Item -recurse -force [directory name]


The really sad thing is that I googled this and there are no sites that explicitly state how to do this. The comparable command in bash :
rm -rf [directory name]


Yet one more reason I hate Microsoft and PowerShell in particular.

RESTful web services with JAXB 2.0

In case you're not familiar, JAXB (Java API for Xml Binding) is an API developed by Sun for binding between XML and Java beans. The first incarnation, 1.x, was interface+implementation based like XMLBeans from the Apache Project. The second incarnation, 2.x, is annotation+pojo based. It's a very useful API, especially when combining it with Hibernate (and the Java Persistence API) to make restful web services. But, that's not really the point of this post. The point is that I've been using JAXB 2.0 for web services much in the past, but this is the first time where I've had to map the "class hierarchy per-table" pattern used by Hibernate into XML. As it turns out, JAXB 2 has very good support for this pattern. You can read more on this at the java.net project page for JAXB.

Thursday, May 08, 2008

Logging for tests using the Spring Framework

It's been a while since I've posted to the blog 'cause I've been so busy, and it seems fitting that this be a good way to resume posting, as this issue has pissed me off quite a bit and been a major thorn in my side for the longest time.


The Spring Framework has some pretty good support for creating test classes for your application, however it by default does not properly initialize log4j logging when doing tests, and I found out today why. When running your application in a Servlet container, you'd configure Spring logging in web.xml. However, when running in a standalone context, Spring has no way of knowing how you want logging configured, so it leaves it up to log4j to configure itself. On that front, you have to realize what log4j's default configuration strategy is : reading a 'log4j.properties' file from the root of the classpath. Once this hits you (and it took me a while), getting logging running for your test cases becomes a simple matter of placing a valid 'log4j.properties' config file in the root of your test classpath, and logging starts working properly, so now you can read those pesky hibernate generated queries off your test log .

Monday, March 31, 2008

Determining which version of Ubuntu you're running

At our company, we use Ubuntu to run our servers. Sure, there's other distributions with potentially better performance, but none so easy to setup, and that was the deciding factor in choosing which distro was going to run our systems because I had to set it up, and since I'm both developer and system administrator (*very* small company), I really don't have time to fuck around and I need to make things as easy on myself as possible. To that end, I've decided to start writing tutorials on everything I learn how to do, and I needed to make sure I note which version of Ubuntu / Linux I'm using. To that end, in order to determine which version of Ubuntu you're running, run this :
cat /etc/issue

I learned of this little trick from here.

*Edit (2008-10-29)*
A much better command for determining the current Ubuntu version you're running :
lsb_release -a

Sunday, March 30, 2008

More random MySQL glitches

I ran into the following exception recently when moving an application from one server to another :

java.lang.NullPointerException
at com.mysql.jdbc.StringUtils.indexOfIgnoreCaseRespectQuotes(StringUtils.java:959)
at com.mysql.jdbc.DatabaseMetaData.getCallStmtParameterTypes(DatabaseMetaData.java:1296)
at com.mysql.jdbc.DatabaseMetaData.getProcedureColumns(DatabaseMetaData.java:3670)
at com.mysql.jdbc.CallableStatement.determineParameterTypes(CallableStatement.java:702)
at com.mysql.jdbc.CallableStatement.(CallableStatement.java:513)
at com.mysql.jdbc.Connection.parseCallableStatement(Connection.java:4422)
at com.mysql.jdbc.Connection.prepareCall(Connection.java:4496)
at com.mysql.jdbc.Connection.prepareCall(Connection.java:4470)
at org.apache.commons.dbcp.DelegatingConnection.prepareCall(DelegatingConnection.java:275)
at org.apache.commons.dbcp.PoolingDataSource$PoolGuardConnectionWrapper.prepareCall(PoolingDataSource.java:292)


As it turns out, there was a permissions issue whereby the function was created with a line specifying the definer and this causes the 'SHOW CREATE FUNCTION' call to fail and return null unless the user calling the statement has permissions for select on the 'mysql' database. Some recommended solutions (that I didn't get around to trying) were changing the call to 'prepareStatement' as opposed to 'prepareCall'. The issue is logged as MySQL Bug #24065. Lately it just seems as if I keep hitting every bug out there, and can't get anything done. I hope this helps somebody.

Commons VFS (FTP) problems

I recently had a problem with Commons VFS whereby I could log into an FTP server and list the contents of the initial directory, but no contents were shown, nor could I otherwise resolve any of the contents or subdirectories. As it turns out, there's a problem with FTPClient which is part of the Commons Net package, and the basis for the FTP module of Commons VFS. After logging in, if the VFS client tries to set passive mode and the server doesn't respond that it allows passive mode (ie PureFTP specifically), then the client just continues as if there's no problem instead of throwing an Exception. The result was getting an error message like "The file root [ftp://myuser:*****@localhost/my_sub_folder] is not a folder". Since I wasn't able to get PureFTP successfully configured to allow passive mode, I was able to resolve the problem by forcing the code to use active mode instead. Fortunately this wasn't a problem since the FTP server in question was on the same machine as the code trying to access it (ie not across the boundary of a router).

Monday, March 03, 2008

Software developers are arrogant assholes

Like the title of this post says, software developers are arrogant assholes. I can say that because I am one. The reason I say this is that I've been Googling like a mofo for articles related to RESTful development and RESTful web services servers using Ruby on Rails, and I must say that it's a real pain in the ass. The vast majority of the articles I'm able to find are in blog format, and are (as is typical with blog-style articles / tutorials) very vague, and don't provide enough detail for a learner to get going. Essentially, they make the assumption that you already know something about Ruby on Rails, and it's a rather hypocritical assumption for a tutorial / article to make. I will admit that I've been somewhat guilty of doing this same thing in my own blog whenever I've provided code examples, so it stops now. Any future "tutorials" on this blog are going to be as complete as possible, time constraints at work be damned. Vagueness cannot be allowed to continue.

Tuesday, February 26, 2008

Setting up your own domain in (Ubuntu) Linux

...is easier than you actually think. I managed to get a working domain name for our company's servers going using tutorials and information collected from here, here, here and here.
I'd love to be able to write a coherent guide based off of the information from these pages, but I'm just too busy at work to do it at the moment.

Monday, February 25, 2008

A Quick note on SFTP jails

SFTP jails are uber useful and I've blogged about them before, but one crucial piece of information that I've been missing has been where it all started. This thread on the Ubuntu forums was the base for all of the work that I've done and it has made it super easy to setup jails here at work, which is a blessing due to the frequency with which we move files and make changes. I'm finding more and more that I owe an increasing amount of thanks to the people and posts on forums, both Ubuntu and others, so thanks.

Thursday, February 14, 2008

Understanding Linux ACLs


Ok, so for a while, I've had an SFTP jail setup within our company for our clients to connect and dump batch files to our systems. It later became necessary to have an administrator user for our staff to be able to go in and read any of the files from any of the clients without having to use a bunch of different logins to login as each individual client. This makes sense as doing so would be far too cumbersome. This is where ACLs came in. I learned what I had to so that I could get going, but that wasn't very much (forunately at the time, unfortunately later). I've since learned a couple of interesting things since then:


There are two types of ACLs in Linux :

  1. The access ACL which controls access to files and directories

  2. The default ACL which applies to directories only and acts like a template for newly created files and subdirectories within that directory.



The former I knew about right from the beginning (pretty obvious), but I didn't really know the proper name for what I was manipulating. The latter I learned about today, and had previously assumed that Access and Default ACLs were one and the same (I didn't know about the distinction because it wasn't mentioned in the fucking
man
page
.

Wednesday, February 13, 2008

Today's karmic balance

It's pretty rare that I have something to blog about within a couple days of each other, never mind on the same day, so I suppose that this post is the karmic balance of the other for today. I just found out yesterday that because one of our processing partners had their douchebag of a contractor ditch on them, I now have to maintain two systems and integrate them both together. That in and of itself is kinda stupid. But at least this'll be my first production interaction with Ruby on Rails, and I'm actually really looking forward to the experience. I've been meaning to get myself up to speed with Rails, but between feeling tired after I get home and working so damn much that I don't have the time when I get home, I haven't really had the chance. It's going to be an interesting coming couple of weeks.

Use DTDs, they're there for a reason

<rant>
As the title of this post states, if you're a web developer, use a fucking DTD. They exist for a reason, that reason being to allow web browsers to have a reasonable expectation of the content and layout of your website so they can deliver the desired experience to your user. If you're a web developer and you don't know what a DTD is, then you're in serious fucking trouble. In the event that you don't know, DTD stands for Document Type Definition, and it's a single line of XML that goes at the very beginning of HTML documents and almost at the very beginning of XML documents (if they're validated by DTDs). An example of a DTD :

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">


That DTD is for a strictly validated XHTML 1.0 compliant HTML document. This means that it's strongly validated and web browsers can expect everything after it to be well-formed quality. That's right bitches, I use a good version of HTML and I'm standards compliant like a mofo.
</rant>

Sorry, but I just had to get that out. Lately, I'm being inundated with pages from our partners that don't use DTDs, don't validate to anything, and are just generally horrible markup. Now, normally, a person wouldn't need to care, but when your partners are lame and you have to scrape their pages for data, the lack of developing to a DTD becomes a giant pain in the ass.

Tuesday, February 12, 2008

A letter to Apple

Yes, these things are cliche, but whatever, screw it.

Dear Apple,

In case you haven't figured it out from my buying patterns, I've stopped buying music from you that isn't DRM-free. This is because, as Steve Jobs has admitted himself, DRM is archaic, and frankly doesn't work. What your company doesn't seem to realize is that if you want to continue your high level of sales, (especially to technophiles / audiophiles such as myself) you're going to need to get going on casting off the DRM that exists on your considerable library of music. I steadfastly refuse to purchase any music that is not DRM-free. I want to be able to play it anywhere on whatever device I may happen to own, and I'm not going to cave into unreasonable demands of an already clearly greedy music industry that refuses to evolve and adapt to technological reality. If I can't purchase music through large corporations such as yourself, I'm going to look elsewhere : smaller sites, independent labels, and independent artists who publish their music for free. So, until you get your head out of your ass : screw you, you're not getting one thin dime from my pocket.

Wednesday, January 30, 2008

Beginning with SQL Server

Given changes in the environment in which our business does it's business, I've had to start learning to use SQL Server and various other Microsoft technologies lately. A quick note on connecting to a foreign SQL Server instance (ie another company's ...) :


Create an alias for the database so that Enterprise Manager has something to work with

Open up the Client Network Utility which should be included in the Start menu programs group for your SQL Server installation.
Start -> Programs -> Microsoft SQL Server -> Client Network Utility -> Alias (tab) -> Add... (button).
Under 'Server alias' enter an easy to remember name for the connection. Under 'Network libraries' select the TCP/IP option. Under 'Connection parameters' enter the DNS name or IP address under 'Server name' of the server you wish to connect to. Change the option for 'Dynamically determine port' if the server you're attempting to connect to doesn't run under the standard port of 1433 for SQL Server. Hit 'Ok' to save your settings, then 'Ok' to close out of the Client Network Utility.

Create a new SQL Server Registration to add your SQL Server instance to the Enterprise Manager

Open Enterprise Manager. It should be in the same programs group in the start menu as the Client Network Utility. Once open, create a new SQL Server Group if you haven't already got one (or haven't got one that you want to add your new SQL Server instance to). Then add a new SQL Server Registration. You should be prompted by a Wizard at this point. (If you're not, then you've probably already disabled the Wizard and don't need to be reading this tutorial.) In the wizard, click on 'Next' to proceed to the SQL Server selection screen. Under the list of available servers, you should see the alias you just created in the previous step. Select it and click on 'Add >' to add it to the list of added servers, and then click on 'Next'. After this, you should be prompted for credentials, and the rest is pretty self explanatory.

Thursday, December 20, 2007

Cool MySQL tricks

It's been a while since I've written a blog post, and I figured this was a good topic to write about. For the most part, I don't use SQL. There's numerous ORM (Object-Relational Mapping) tools and frameworks out there (ie Hibernate) that abstract away all the boiler-plate CRUD code. When I do have to use SQL (be it on the command line or with a query browser), I'm not afraid to dive right in, and it's interesting to learn new things about SQL because there's so much to learn and it's such a ridiculously powerful query language.

I had to update the configuration in one of my databases, but the configuration required inserting numerous new lines into a table for user roles. I could've written a stored procedure to do it, but given my (admittedly somewhat limited) knowledge of SQL, I figured this would take too much time and I didn't want to do it. (This is what I like to call constructive laziness). I kept Googling and found MySQL's INSERT INTO ... SELECT syntax, which allows you to conveniently insert the results of one query into another table. I'm sure that most other database implementations out there have a similar convenience syntax, but I happen to work with MySQL so there you go. It took what would've been a numerous line script with looping and separate SELECT and INSERT statements down to a one liner. If you use MySQL (especially with LAMP / AJAX) I recommend that you read about it.

Monday, December 03, 2007

Deleting .svn folders

Subversion is a great tool, but unfortunately it has to store metadata somewhere, and like all other code repository tools, it stores them in metadata directories ... for each directory in whatever project you're working with. I recently found that I have to copy parts of my project documentation out for some of our clients to access our APIs, and I wanted to quickly delete all the SVN metadata and trim certain other files where necessary. I figured the quickest way to do with would be a simple command. Windows PowerShell provides a good (though not great, and certainly not as good as bash) way of doing it :


Get-ChildItem -Recurse -force |where {$_.PSIsContainer -AND $_.Name -match "svn"} | foreach ($_) {remove-item -force -recurse $_.fullname}


Two Google queries yielded the above command from this blog. Happy reading.

Sunday, December 02, 2007

Resolving logging issues with Tomcat

An idea for resolving the logging issues in Tomcat just occurred to me. I should probably be putting the logging jars in the individual lib directories for each of the webapps. I'll have to give this a try and post my results.

Thursday, November 29, 2007

Onward to Eclipse Europa ... and then right back

Ok, so after Eclipse Europa (3.3) has been out for quite some time, I decided to give it a try. I was shocked to discover a lot of the changes that had happend :
it seems that the Eclipse foundation has become Red Hat's bitch. There's a ton of new Red Hat sponsored / produced editors (none of which are very good.) There's a ton of useless JBoss integration. The editor overall is slower. All my hotkeys (save one or two, literally) were standard with installation in Eclipse 3.2, and are now broken in Eclipse 3.3. I'm severely disapponted after using Europa for three weeks. The only good part about it is that access to my SVN repository was considerably faster. And it seems I'm not the only one who feels this way about Europa. Get it together Eclipse.

Oh, and the thing that brought me to writing this post in the first place : the XML schema editor is flat out broken. What the fuck ?!

Thursday, November 15, 2007

Spring, Tomcat and memory

As my application has been growing more and more in functionality, it has also been growing in its memory footprint. To that end, I've started getting OutOfMemoryErrors because the heap has started overflowing. That's really not a problem if you have access to the external Tomcat server, you can just increase the heap size using "-Xms128M -Xmx512M" (or other applicable sizes) on the command line, or do this via the GUI if you're running Tomcat in Windows. However, it's not as obvious if you're using Tomcat for debugging within the Web Tools Platform plugins for Eclipse. I started running into this problem recently and it ground my development to a halt until I was able to fix the problem for WTP in Eclipse. WTP passes its arguments to the Tomcat server via the Launch Configuration. To get to it, right click on the Tomcat server in the Servers view -> 'Open' -> 'Open launch configuration' -> 'Arguments' tab -> 'VM arguments:' text box, then add the "-Xms128M -Xmx512M" segment to the end of the parameter list.

Saturday, November 03, 2007

Actual plugin development for Maven 2

Ok, so after Googling around and hours of patient command line testing (yeah, I know, it's horrible, you don't have to tell me) I managed to complete my ToLDya plugin for generating TLD files. Hopefully this plugin will be a great aid to people developing JSP tag libraries other than myself. But along the way, I learned a lot about plugin development in Maven, and I'm quite certain that I've got a shitload more to learn.


  1. Starting out with a barebones Mojo from the Maven provided archetype literally doesn't get you much. It gets you just enough to plug into the Maven framework so that maven can actually run your Mojo, but that's about it.

  2. The AbstractMojo provided by Maven is pathetic. It gives you a logger, and that's about it. By default, it does not give you many of the things a plugin is quite likely to want (more about that later)

  3. Despite the fact that Maven 2 was supposed to be the "lessons learned" version of Maven, I don't think that the Maven developers learned much at all. Maven is grossly behind the times, still relying on XDoclet annotations and pre-JDK 5.0 compatibility. Yes, there is something to be said for keeping things backward compatible (especially in a corporate environment, I know), but at some point you have to move on and do better, in this case : getting up to date with the latest JDK (1.6.03 at the time of this writing).

  4. If you need anything (ie from Maven) while writing a Maven plugin, you have to specifically request that it be injected for you. (See the Maven documentation, this is the one area where they're actually good about documenting things and helping out developers)

  5. If your plugin needs to access any of the classes in the project on which it's run, you need to load them yourself with your own classloader. Maven will not give you one (which is pretty ridiculous to my mind).



Here are some important bits of knowledge for doing anything with a Maven plugin:

  • If you need access to anything from the project (ie any information stored in the POM), you'll need to include the following dependency :


    <dependency>
    <groupId>org.apache.maven</groupId>
    <artifactId>maven-project</artifactId>
    <version>${maven.version}</version>
    </dependency>

    In my current POM, the property 'maven.version' is set to 2.0.7. You'll then need to have a property in your plugin Mojo called 'property' (or whatever else you find suitable) and annotate it like so (from within a Javadoc comment of course):



    * @parameter expression="${project}"
    * @required


  • As mentioned previously, if you want to load any of the classes that are in the project on which your plugin is to execute, you have to load them yourself. The same goes for any of the project's dependencies. In order to do this, you'll have to get the list of dependencies (compile, test, runtime) from the MavenProject object ('property', remember?) The MavenProject object has a property called 'runtimeClasspathElements'. This gives you a list of strings that are fully qualified file system paths to the classes in the ${project.build.outputDirectory} as well as all of the dependency JARs on which the client project depends. You'll then have to load them yourself. I did so with a URLClassLoader (part of the JDK). I used the following function for creating the classloader :


    private static URLClassLoader getDependencyClassloader(List dependencies) throws MalformedURLException {
    List classpathUrls = new Vector();

    URL url = null;
    for(int index = dependencies.size() - 1; index >= 0; index--) {
    url = new File(dependencies.get(index)).toURI().toURL();

    classpathUrls.add(
    url.toExternalForm().endsWith(".jar") ?
    url :
    new URL(url.getProtocol(),url.getHost(),url.getPort(),url.getFile() + "/") //add the '/' o indicate a directory );
    }
    URLClassLoader ucl = new URLClassLoader(classpathUrls.toArray(new RL[] {}), Thread.currentThread().getContextClassLoader());

    return ucl;
    }

    Once you have this class loader, you can use it in the long version of Class.forName() to load any classes you may need, as well as perform any loading logic you need with the .getResources() functions on the classloader.





I'll be doing more plugin development in the coming months I'm sure, so I'll try to post what I learn here, but that's about it for now.

Thursday, November 01, 2007

Plugin development with Maven, obsessed

Ok, I let my obsessions get the better of me and I couldn't drop it : I kept searching the interwebs until I had found a solution for my problem. Surprise, surprise, the maven-plugin-testing-harness is a broken ass piece of shit. Enough editorializing though, here's how things went :

1) I had to make sure all of this sh*t was in my POM :
<dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-core</artifactId>
<version>2.0.7</version>
</dependency>
<dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-project</artifactId>
<version>2.0.7</version>
</dependency>
<dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-plugin-descriptor</artifactId>
<version>2.0.7</version>
</dependency>
<dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-plugin-api</artifactId>
<version>2.0</version>
</dependency>
<dependency>
<groupId>org.apache.maven.shared</groupId>
<artifactId>maven-plugin-testing-harness</artifactId>
<version>1.0</version>
<scope>test</scope>
</dependency>

2) I then had to change from using 'lookupMojo' to instantiating my own mojo and using 'configureMojo' on the test harness. With the former, you'd have to put the /META-INF/maven/plugin.xml file somewhere on the classpath so that the MavenPluginDiscoverer module can detected it.

Why didn't they just tell you helpful things like these on the wikis for the plugins ? God dammit, I'm quickly hating Maven more and more. And to boot, it doesn't properly evaluate the required expressions and inject stubs for them, but maybe I've just missed something (easy, given how poor the documentation for the project is).

The anniversary

I didn't even notice it until today that it's the one year anniversary of this blog. Actually, the anniversary was two days ago, but close enough. Considering my general dislike of (non-development) blogs, that's actually quite impressive. I didn't think it'd last this long. Yay blog.

Maven ... OMG

My recent foray into developing Maven plugins has led me to a horrible discovery : the Maven developers ... suck. They're just not professional. There's no javadoc for the core classes. There's no javadoc for the base plugin classes and interfaces. And the AbstractMojo class which is supposed to be a good starting base class for all other plugins is, to say the least, horribly designed : it doesn't contain access to the MavenProject that's currently being operated on, you have to add it in manually Ironically (or appropriately, take your pick) they made that a plugin too. The supposed "tutorial" on the Maven main site doesn't show any mention of this. The plugin "cookbook" on the site has been "coming soon" for so long that you have to wonder if it's ever going to get written. And, on top of all this, there's no generics and no direct support for JDK 5 annotations and enums. (See previous post). I've now reached a point of severe disappointment with the Maven project, and I'm fast approaching the point where I'm genuinely sorry that I've made this much of a time investment in it.

Wednesday, October 31, 2007

Maven 2 quirks and the "bandwagon"

I recently found myself developing (and using) a lot of small tag libraries to make my life easier when it comes to developing my web applications. The only thing that really gets me is that when I write these tag libraries, I have to manually update all of the TLD (tag library descriptor) files that are necessary for using the tags in JSPs. This irks me and it occurs to me that there's no reason I can't have Maven generate the TLDs for me based on annotations I place on the tag classes. The only problem with this is that no such plugin exists, so I figured that as a foray into Maven plugin development, I'd try making such a plugin as my first try. It is then that I ran across a rather large and bothersome quirk with Maven : it's not entirely ready for Java 5 and up. In fact, rather than using proper Java 5 annotations, it uses XDoclet comment annotations to provide metadata for building plugins, but that's not the quirk. The real quirk is that it can't handle having Java 5 annotations in the same project as the plugin. It can handle Java 5 enums just fine, but not annotations, which seems rather strange to me.

This is all part of the bigger problem of companies not keeping themselves up to date with the latest Java technology and staying with Java 1.4.2 and earlier. Sun has done a great job of maintaining backward compatibility, and there's (almost) no reason that companies shouldn't be upgrading the JVMs on their servers to the latest and greatest versions. The only real reason I can think of is that somewhere in their code, they've used variables named 'enum' which becomes a Java keyword in Java 5 and up (aka Java 1.5, 1.6, etc), and this can be mitigated by refactoring the code. Hell, Eclipse makes that job quick and easy, especially when you really know your keyboard shortcuts. I'm fortunate enough to be able to use Java 6 (and very quickly upgrade to Java 7 as soon as it's released and stable). As much as this is going to make me a snob, I'm getting really tired of being dragged down by other people's need for backward compatibility (and hence also tool developers' appeasement of these people which then affects me). Seriously people, get your act together.

Wednesday, October 24, 2007

Java enums are even cooler than I ever knew !

A quick note on Java enums : I discovered today that not only can enums have methods (which is exceedingly useful to begin with), but that you can individually override methods on a per-enum-value basis! That's so cool. I read it on this blog.

Sunday, October 21, 2007

An interesting Spring Framework quirk

Recently, I discovered the joy of the PropertyEditorRegistrar interface in the Springframework, for conveniently registering property editors to bind between objects and text when rendering forms. This joy led me to discover an interesting quirk : if you subclass a Form Controller and register property editors that way, the object of a field will be bound to the BindStatus.value property, but if you use a PropertyEditorRegistrar, that value gets edited and the spring representation of it bound to BindStatus.value. This is a small distinction that makes a huge difference when writing (and rendering) your pages.

Wednesday, October 17, 2007

RSS and Me

I love RSS, I think it's a great way to read the news and generally stay up to date on any sites you read and any (legit) torrents you may want to download, or podcasts if that's your thing. I've been kicking around the idea of having an RSS notifier for our system at work, so that I wouldn't have to be sending out extraneous emails all over the place. According to this guy, creating an RSS feed is stupid simple. Maybe when I get the chance I'll give it a try.

*EDIT* : Ok, me being me, I was fascinated with the idea of dealing with something new and couldn't let it go. I followed one breadcrumb after another and found, fortunately for me, that Spring already has an (Abstract)RSS view class in the Spring Modules library that uses the Java Rome RSS library in the background. I can't wait to slog through the crap I have to deal with at the moment so that I can fool around with RSS and have RSS feeds supplying the notifications for our production system. This is going to save so many headaches.

*EDIT 2* : Heh, this guy's page is awesome. It shows that you can use security with RSS feeds, which will be perfect for my company.

Monday, October 15, 2007

Moar Hibernate !!!

Yet again, I'm posting because of fucking hibernate. One of the old quirks I had run across was that if you had a setter for a collection, ie :

public void setItems(List items)
{
this.items = items;
}

...this would replace the hibernate-backed collection that already existed (if one did) and could replace it with a non-hibernate implementation such as java.util.Vector, and any cached items would not get properly dealt with, and the collection would not get properly persisted, even to the point of throwing an exception. This resulted in me having to change my setters to this :

public void setItems(List items)
{
if(this.items == null)
{
this.items = items;
} else
{
this.items.clear();
this.items.addAll(items);
}
}

The only problem with this is that depending on the scenario, hibernate gets the collection, and then sets exactly the same list object back into the persistent entity we're dealing with, which would result in clearing exactly the same list we're trying to assign. The remedy :

public void setItems(List items)
{
if(this.items == null)
{
this.items = items;
} else if(this.items != items) //fix: identity check the two lists!
{
this.items.clear();
this.items.addAll(items);
}
}

That really should have been there anyway, but hibernate inspired it. Fuck you hibernate.

Saturday, October 13, 2007

Spring and PropertyEditors, important details

As mentioned in the documentation, the Spring Framework uses property editors in two places:
1) When parsing ApplicationContexts from XML configuration files
2) When binding beans from HTTP requests.

It's all well and good if the only classes you need to bind are already covered by the editors built in with the Spring Framework, but if you need property editors for other kinds of classes, here's how to get Spring to include them, for both situations :
1) Use a CustomEditorConfigurer like so :

<bean class="org.springframework.beans.factory.config.CustomEditorConfigurer">
<property name="customEditors">
<map>
<entry key="java.text.DateFormat">
<bean class="com.mypackage.CustomDateFormatEditor"/>
</entry>
<entry key="java.util.TimeZone">
<bean class="com.mypackage.TimeZoneEditor"/>
</entry>
<entry key="java.text.MessageFormat">
<bean class="com.mypackage.CustomMessageFormatEditor"/>
</entry>
<entry key="java.text.NumberFormat">
<bean class="com.mypackage.CustomNumberFormatEditor"/>
</entry>
</map>
</property>
</bean>

* Note * : The editors can be singletons because they're going to be the only instances needed by the application context to parse string values.

2) Implement and instantiate (or otherwise use) an implementation of PropertyEditorRegistrar. This will be used with your form controllers to register property editors for individual fields or whole types.

Thursday, October 04, 2007

Ubuntu Linux is finally in the right place

I love Linux. Now, at this point, you're probably thinking that I'm just one of these fanboy nerds that loves to sit on a computer all day and hack on code. You're partially right : I do love to sit on computers and hack on code, but certainly not all day and certainly not at the expense of other fun activities like going to hockey games and being with friends. That said, I like Linux because it's good for doing development and it's ridiculously stable. It's even quite performant as well, which is
quite nice. I like it to the point that I'd want to put it on all of the computers here in the office, and as per the title of this post, I think that Ubuntu Linux is at the point where that's a practical possibility. In the past few days, I've installed Ubuntu 7.04 (Feisty Fawn) on two machines here in the office, set them up to do networked printing, networked file sharing, and everything else that may be needed by office workers. Hell, they can even use the built in Terminal Services client to remote into our server here in the office if they need to do any centralized work. They even have all their networked drives mounted for them, and they can see the Windows domain on which this place runs. The only thing remaining is for the systems to pass the Boss Test : sit the big boss of the company down on one, and if he can print his stuff, access his stuff and access our server exactly the same as he could in Windows, then it stays.

There's a great tutorial on setting up Samba with Windows Shares in Ubuntu here. Printing was
ridiculously easy to set up : I went to System -> Administration -> Printing , it autodetected the printers on our LAN and setting them up was merely a matter of following on screen directions.

Wednesday, September 26, 2007

MySQL character sets, the end

Many times in the past I've tried to get our foreign MySQL server to properly store and handle unicode character sets. Despite the fact that I repeatedly set 'default-character-set=utf8' all over the config files and set the server and client character sets and collations in my.cnf, it still wouldn't handle them properly. Here's the kicker : to make absolutely sure that the server uses only server settings for handling character set (and sets 'fuck you' to whatever the client requests), you have to use the '--skip-character-set-client-handshake' argument when starting the MySQL daemon (server). This is what finally got it going for me. I hope this post helps somebody out someday. (Given my rate of forgetting things, it's likely to be me)

Tuesday, September 11, 2007

<rage>MOAR HIBERNATE! :@</rage>

God damn, I'm getting so fucking tired of Hibernate and its quirks. Here's a new one regarding Criteria queries :

When you create a criteria query, you almost invariably have to specify a result transformer of Criterion.DISTINCT_ROOT_ENTITY (that's a quirk, but not the topic of this post). When you have an entity on which you want to build a criteria query and you want to limit the number of search results (ie distinct root entities), things get really tricky. Specifying 'setMaxResults' on the query affects the number of rows returned from the database that are actually inspected. Therefore, if there are any joins on your entity that have collections, this will cause a fetch with an outer join strategy to generate an excessive number of rows and affect the results when using a maxResults setting. In the case of using embedded properties, specifying a fetch mode of SELECT will not override these (this is a glitch in hibernate). You'll have to specify the fetch mode manually in the metadata (be it XML or Annotations) permanently for the embedded class. I fucking hate Hibernate sometimes, I really do.

Friday, September 07, 2007

Spring AOP rage ...subsiding

Spring AOP is breaking my heart. Spring overall is a great framework and I love working with it. The idea of using aspects to interweave code and keep concerns separated and code clean is a wonderful though to somebody who loves to architect software, such as myself. But I swear, it feels like I'm hitting every bug in the book when it comes to using AOP in Spring. Certain pointcuts don't get matched properly, regexp based pointcuts get loaded when they shouldn't, the list goes on. I really want to use AOP to design the next big phase of my project that's coming up, but they're making it really hard to justify the decision to do so. You're making my heart cry, Spring.

Thursday, September 06, 2007

Google Disappointment

...do it. You just might find a page with me ... pointing at Google on my screen.

Today I was messing around with some of the CSS styling in my API documentation for one of our company's merchant partners, and I found that some of the elements weren't quite right. I then remembered an article I had read on Digg regarding CSS reset stylesheets, and how Google and Yahoo both use them in their free APIs to provide consistent styling results across browsers, so I went to investigate using one of said reset stylesheets to help me out. I figured I'd Google it first (no pun intended), and upon not finding anything relevant quickly, figured Google would be smart enough to use their own reset stylesheets in their own pages. I pulled up the source on one of my query pages, only to discover a developer's horror : inline styling all over the place, and google didn't even use their own stylesheets anywhere in their main site! The code was horrible spaghetti. I ran the page through the W3C web site validator, and it didn't pass a single standard, ie HTML 4.01 / XHTML (any flavour). As one of the big Web 2.0 sites, I would have expected you to have higher regard for international web standards. I'm very disappointed in you Google.

Wednesday, September 05, 2007

A quick note on versioning

I've come to really admire the way the open source community has been versioning their products over the last couple years, especially in the java open source community, which I've found to be very bright and vibrant. Generally, the projects adhere to the following conventions (where 'x' is an integer) :

Version :
x.x.x - Version with major version, minor version, patch increment
0.x.x - Beta software, not to be considered ready for production use (generally, some projects have very odd development cycles and version conventions)
1.x.x - Version 1 (good for looking at, but you may want to wait for version 2, especially Apache projects *cough*maven*cough*struts*cough*)
x.x.x-Mx - Milestone beta version - has certain promised features according to the milestone version, but not the final version with that number
x.x.x-RCx - Release candidate beta version - has all the promised features according to the release plan for that version for the project, but is not considered to have been sufficiently tested

If anybody feels differently about my descriptions, please, by all means, correct me.

Weak Java

I rarely (if ever) use switch statements, mainly because if their use is required, generally a very poor design decision has been made. But sometimes they're the right choice. That said, I was forced to use a switch statement in some of my code today, and found that Java switch statements can only switch primitive ints and enums in the java language. Not even longs or shorts. That is the weakest shit I've ever encountered. Are you fucking retarded Sun ?

Sorry, I just had to bitch about it, because it's just that stupid.

Monday, August 27, 2007

I love Eclipse now more than ever

Eclipse has always been a very useful platform, with tons of little developer driven quirks that make the programmer's job so muche easier. I found this out especially lately when I've been trying to get into developing with C# in Visual Studio 2005, and I've been missing features that are just there in Eclipse, but you have to buy a plugin for with VS2k5. And here's the great thing about that whole situation : you can develop C# in Eclipse on Windows with a free plugin. But that's really all just a side note. The main point of this post is this : I just now observed an exceedingly useful feature that's been in Eclipse for several versions. Ever press Ctrl + Shift + T to open the quick loader for classes ? It has a text bar at the top so you can type in the simple name of the class you're looking for and it'll filter out the results. In the filtering options up top, there's the usual ? and * filters, but beside that there's also a spot that says "TZ - TimeZone" which I've never really noticed before, and I don't know what made me notice it today. I typed in TZ, and much to my surprise, a whole list of classes remained in the window, filtered, and they all had TZ capital letters in them, following the general naming conventions in Java. Intrigued by this, I typed in MPREF (ManualPaymentRequestEntryFlow) which is a class in my project that I just finished working on, and sure enough, it filtered the list to have that in it (it was the only class not filtered out) . I thought that was just the greatest filter I'd ever seen. And to build on it, if you use the content assist hotkey (Ctrl + Space) with the same scheme, it automatically fills in those classes for you. How cool is that !?

Wednesday, August 22, 2007

Moar (sic) hibernate !

Yet more hibernate fun facts that I wish I'd had before I started any projects : Hibernate has the ability to persist instances of java.util.Locale, java.util.Currency, and java.util.TimeZone. A sprinkling of a few bits of these pieces of code and their corresponding hibernate annotations could have saved me an assload of time spent on doing my own design, development and testing.

On a small side note, I found I've actually managed to come up with a much better and more appropriate solution for dealing with currencies than the java.util.Currency class, as it's woefully inadequate to the task of dealing with currencies in an enterprise system. I refer specifically to the way it deals with floating point arithmetic in its number of decimal places, rather than adering more closely to the ISO 4217 specification whereby currencies should have a base and an exponent when calculating a multiplier to go between base currency units and major currency units.

Rampaging bull software design

If you're into the software scene, then you most likely know that some of the big buzz terms in the last few years have been RAD (Rapid Application Development) and Agile development (keeping your code clean and maintainable, and incrementing your design and development process in very small steps). With ever changing requirements and very little chance to get the big picture ahead of time given the small amounts of information I get from my bosses, I have no choice but to follow an agile development process. As best as I can tell, I've been doing so ever since I've started here.

Unfortunately, this style of development has caught up with me : I'm finding that even though my code is fairly clean and very easily maintainable, there are memory leaks (I'm using Java, so this means that something's holding onto references and preventing the automatic garbage collector from doing its job). This means that I now get to go back with a profiler, analyze the running of my software, and find out where the leaks are and correct them.

This is going to be interesting.

Friday, August 17, 2007

Moar (sic) Spring Batch !

I received a pleasant surprise in my inbox today (which doesn't happen often), and that surprise was an email from the Spring Batch team notifying me that they'd just released Spring Batch 1.0-M2 to the subversion repo and made it public. This was great because it gave me a chance to download the code and play around with it, look at the samples, and generally get an idea of what's coming for Spring Batch.

In case you weren't aware, I've been looking forward to the release of Spring Batch for quite a while because it can help me a great deal at work, where I'm already using the Spring Framework for my web applications. Unfortunately, the M2 release doesn't appear nearly ready for a production environment, but it's a start. I want more ! faster ! now !

Thursday, August 09, 2007

Why you should write good software

Today at the company where I work, our remotely hosted server went down suddenly. Nobody know why, and then the shit started to hit the fan, with emails flying back and forth, and phones ringing off the hook in search of an answer to that one timeless question, WTF ? Of course, everything filtered towards me eventually because I'm considered responsible for the server (even though I'm no where near it and if I don't have a network connection to it, I can't do shit). After several phone calls back and forth between our former partners (who happen to be in the same building as our ISP and still maintain our server), and our ISP, and our firewall technician (who, oddly, is outsourced by our ISP and doesn't technically work for them), it was discovered that the machine was alive, but not responding to any network traffic, so the quickest and simplest solution was to just get somebody to reboot the machine. Eventually, one of our former partners' employees went down to the cage and rebooted the server. Later, in an email explaining what had happened, that same employee (who happened to be their sysadmin) explained that their DVD backup had frozen the whole box and that this situation had never happened before. (This during our peak period for the day).

Now, I can believe that (I've had weird shit like that happen to me, so I can't really bitch at the guy.) But it brings to mind the need for good software and why good software should be written : if you write software, and it fucks up, then the managers of those using your software don't see it as your fuckup, they see it as their employees' fuckup, and hold them responsible. I hate the thought of being one of those employees (and very nearly was one today). This has brought forth some small amount of Karmic inspiration to put more time into testing and finding those odd failure modes for my own software that you wouldn't think can happen, but can because you eventually hit an odd and unexpected set of circumstances. Just something to think about if you're a serious developer and you're reading this.

Tuesday, July 31, 2007

Linux : the more you know ...

I'm somewhat embarrassed to admit this, but I've just discovered Linux ACLs, which are apparently in every kernel 2.6+, and has plugins for 2.4-. I guess a really linux savvy person should know these, especially since they're incredibly useful and give so so much more power over the default User - Group - World permissions built into the file system.

Friday, July 27, 2007

JSP Tags

They're oh so useful for encapsulating small bits of reusable logic, but, here's the kicker : when they're compiled, only one instance of a tag is generated for a given page, so you have to design them to be stateful and ensure that you override the 'release' method when inheriting from TagSupport or BodyTagSupport. It took me a couple of hours to figure this out, and sadly, it was for the second time. I guarantee there won't be a third after this.

Wednesday, July 04, 2007

Moar (sic) Hibernate

OK, so once again in my (seemingly never-ending) struggle with Hibernate, I have stuff to report that's mainly being posted here for my own reading so that I can reference this shit later. This time, I've decided to abandon Xdoclet and just go with Annotations. They're so much easier and I don't have to deal with Xdoclet's quirks any more (like ridiculous parsing exceptions between versions).

If you want to use annotations to do a bidirectional OneToMany assocation with list-based semantics, here's the annotations you use :

Parent.java (equals, hashCode, getters/setters removed for clarity):
@Entity
@Table(name = "parent")
public class Parent implements Serializable {

private static final long serialVersionUID = -1989884660562516228L;

@Id
@Column(name = "id")
@GeneratedValue(strategy = GenerationType.IDENTITY)
private long id;

@Column(name = "name")
private String name;

@OneToMany(mappedBy = "parent", fetch = FetchType.EAGER, cascade = {CascadeType.ALL})
@Cascade({org.hibernate.annotations.CascadeType.ALL,org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@IndexColumn(name = "idx", nullable = false)
private List children;
}


Child.java (equals, hashCode, getters/setters removed for clarity):
@Entity
@Table(name = "child")
public class Child implements Serializable {
private static final long serialVersionUID = -6414526385302360120L;

@Id
@Column(name = "id")
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;

@Column(name = "name")
private String name;

@ManyToOne
@JoinColumn(name = "parent_id")
private Parent parent;

@Column(name = "idx")
private int index;

public Child() {
super();
}

public int getIndex() {
return this.parent.getChildren().indexOf(this);
}

public void setIndex(int index) {
}
}

Note that you need the 'index' pseudo-property in the child class in order for hibernate to properly persist the ordering of the elements in the collection. The setter for the index property should do nothing, the getter should determine the object's placement in its parent, and you'll need the field with annotations in order to get hibernate to read everything properly. The name of the column in the annotation for the index property MUST be the same as that specified in the parent in the @IndexColumn property. They aren't very clear about this in the Hibernate Documentation (once again.) They mention a similar structure for the .hbm.xml mappings in a faq on the main site for hibernate (not the hibernate annotations faq), but they don't explicitly mention this anywhere for Annotations. You can read the mention of it for .hbm.xml files here. I'm sure I'll have to post more about annotation configurations later, but that's all for now. And if you're wondering, the misspelling in the title of this post is an inside joke.

PS. Note that in addition to the OneToMany and IndexColumn annotations on the child collection in the parent, you also need to have a @Cascade annotation in order to properly remove orphans from the database when deleting children from the child collection in the parent. You can also find the table that gave me my final clues on Darren Hicks' blog